github.com/fjballest/golang@v0.0.0-20151209143359-e4c5fe594ca8/src/runtime/runtime2.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  /*
    14   * defined constants
    15   */
    16  const (
    17  	// G status
    18  	//
    19  	// If you add to this list, add to the list
    20  	// of "okay during garbage collection" status
    21  	// in mgcmark.go too.
    22  	_Gidle            = iota // 0
    23  	_Grunnable               // 1 runnable and on a run queue
    24  	_Grunning                // 2
    25  	_Gsyscall                // 3
    26  	_Gwaiting                // 4
    27  	_Gmoribund_unused        // 5 currently unused, but hardcoded in gdb scripts
    28  	_Gdead                   // 6
    29  	_Genqueue                // 7 Only the Gscanenqueue is used.
    30  	_Gcopystack              // 8 in this state when newstack is moving the stack
    31  	// the following encode that the GC is scanning the stack and what to do when it is done
    32  	_Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state,
    33  	// _Gscanidle =     _Gscan + _Gidle,      // Not used. Gidle only used with newly malloced gs
    34  	_Gscanrunnable = _Gscan + _Grunnable //  0x1001 When scanning completes make Grunnable (it is already on run queue)
    35  	_Gscanrunning  = _Gscan + _Grunning  //  0x1002 Used to tell preemption newstack routine to scan preempted stack.
    36  	_Gscansyscall  = _Gscan + _Gsyscall  //  0x1003 When scanning completes make it Gsyscall
    37  	_Gscanwaiting  = _Gscan + _Gwaiting  //  0x1004 When scanning completes make it Gwaiting
    38  	// _Gscanmoribund_unused,               //  not possible
    39  	// _Gscandead,                          //  not possible
    40  	_Gscanenqueue = _Gscan + _Genqueue //  When scanning completes make it Grunnable and put on runqueue
    41  )
    42  
    43  const (
    44  	// P status
    45  	_Pidle    = iota
    46  	_Prunning // Only this P is allowed to change from _Prunning.
    47  	_Psyscall
    48  	_Pgcstop
    49  	_Pdead
    50  )
    51  
    52  type mutex struct {
    53  	// Futex-based impl treats it as uint32 key,
    54  	// while sema-based impl as M* waitm.
    55  	// Used to be a union, but unions break precise GC.
    56  	key uintptr
    57  }
    58  
    59  type note struct {
    60  	// Futex-based impl treats it as uint32 key,
    61  	// while sema-based impl as M* waitm.
    62  	// Used to be a union, but unions break precise GC.
    63  	key uintptr
    64  }
    65  
    66  type funcval struct {
    67  	fn uintptr
    68  	// variable-size, fn-specific data here
    69  }
    70  
    71  type iface struct {
    72  	tab  *itab
    73  	data unsafe.Pointer
    74  }
    75  
    76  type eface struct {
    77  	_type *_type
    78  	data  unsafe.Pointer
    79  }
    80  
    81  func efaceOf(ep *interface{}) *eface {
    82  	return (*eface)(unsafe.Pointer(ep))
    83  }
    84  
    85  // The guintptr, muintptr, and puintptr are all used to bypass write barriers.
    86  // It is particularly important to avoid write barriers when the current P has
    87  // been released, because the GC thinks the world is stopped, and an
    88  // unexpected write barrier would not be synchronized with the GC,
    89  // which can lead to a half-executed write barrier that has marked the object
    90  // but not queued it. If the GC skips the object and completes before the
    91  // queuing can occur, it will incorrectly free the object.
    92  //
    93  // We tried using special assignment functions invoked only when not
    94  // holding a running P, but then some updates to a particular memory
    95  // word went through write barriers and some did not. This breaks the
    96  // write barrier shadow checking mode, and it is also scary: better to have
    97  // a word that is completely ignored by the GC than to have one for which
    98  // only a few updates are ignored.
    99  //
   100  // Gs, Ms, and Ps are always reachable via true pointers in the
   101  // allgs, allm, and allp lists or (during allocation before they reach those lists)
   102  // from stack variables.
   103  
   104  // A guintptr holds a goroutine pointer, but typed as a uintptr
   105  // to bypass write barriers. It is used in the Gobuf goroutine state
   106  // and in scheduling lists that are manipulated without a P.
   107  //
   108  // The Gobuf.g goroutine pointer is almost always updated by assembly code.
   109  // In one of the few places it is updated by Go code - func save - it must be
   110  // treated as a uintptr to avoid a write barrier being emitted at a bad time.
   111  // Instead of figuring out how to emit the write barriers missing in the
   112  // assembly manipulation, we change the type of the field to uintptr,
   113  // so that it does not require write barriers at all.
   114  //
   115  // Goroutine structs are published in the allg list and never freed.
   116  // That will keep the goroutine structs from being collected.
   117  // There is never a time that Gobuf.g's contain the only references
   118  // to a goroutine: the publishing of the goroutine in allg comes first.
   119  // Goroutine pointers are also kept in non-GC-visible places like TLS,
   120  // so I can't see them ever moving. If we did want to start moving data
   121  // in the GC, we'd need to allocate the goroutine structs from an
   122  // alternate arena. Using guintptr doesn't make that problem any worse.
   123  type guintptr uintptr
   124  
   125  //go:nosplit
   126  func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
   127  
   128  //go:nosplit
   129  func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
   130  
   131  //go:nosplit
   132  func (gp *guintptr) cas(old, new guintptr) bool {
   133  	return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
   134  }
   135  
   136  type puintptr uintptr
   137  
   138  //go:nosplit
   139  func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }
   140  
   141  //go:nosplit
   142  func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
   143  
   144  type muintptr uintptr
   145  
   146  //go:nosplit
   147  func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) }
   148  
   149  //go:nosplit
   150  func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
   151  
   152  type gobuf struct {
   153  	// The offsets of sp, pc, and g are known to (hard-coded in) libmach.
   154  	sp   uintptr
   155  	pc   uintptr
   156  	g    guintptr
   157  	ctxt unsafe.Pointer // this has to be a pointer so that gc scans it
   158  	ret  sys.Uintreg
   159  	lr   uintptr
   160  	bp   uintptr // for GOEXPERIMENT=framepointer
   161  }
   162  
   163  // Known to compiler.
   164  // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
   165  type sudog struct {
   166  	g           *g
   167  	selectdone  *uint32
   168  	next        *sudog
   169  	prev        *sudog
   170  	elem        unsafe.Pointer // data element
   171  	releasetime int64
   172  	nrelease    int32  // -1 for acquire
   173  	waitlink    *sudog // g.waiting list
   174  }
   175  
   176  type gcstats struct {
   177  	// the struct must consist of only uint64's,
   178  	// because it is casted to uint64[].
   179  	nhandoff    uint64
   180  	nhandoffcnt uint64
   181  	nprocyield  uint64
   182  	nosyield    uint64
   183  	nsleep      uint64
   184  }
   185  
   186  type libcall struct {
   187  	fn   uintptr
   188  	n    uintptr // number of parameters
   189  	args uintptr // parameters
   190  	r1   uintptr // return values
   191  	r2   uintptr
   192  	err  uintptr // error number
   193  }
   194  
   195  // describes how to handle callback
   196  type wincallbackcontext struct {
   197  	gobody       unsafe.Pointer // go function to call
   198  	argsize      uintptr        // callback arguments size (in bytes)
   199  	restorestack uintptr        // adjust stack on return by (in bytes) (386 only)
   200  	cleanstack   bool
   201  }
   202  
   203  // Stack describes a Go execution stack.
   204  // The bounds of the stack are exactly [lo, hi),
   205  // with no implicit data structures on either side.
   206  type stack struct {
   207  	lo uintptr
   208  	hi uintptr
   209  }
   210  
   211  // stkbar records the state of a G's stack barrier.
   212  type stkbar struct {
   213  	savedLRPtr uintptr // location overwritten by stack barrier PC
   214  	savedLRVal uintptr // value overwritten at savedLRPtr
   215  }
   216  
   217  type g struct {
   218  	// Stack parameters.
   219  	// stack describes the actual stack memory: [stack.lo, stack.hi).
   220  	// stackguard0 is the stack pointer compared in the Go stack growth prologue.
   221  	// It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption.
   222  	// stackguard1 is the stack pointer compared in the C stack growth prologue.
   223  	// It is stack.lo+StackGuard on g0 and gsignal stacks.
   224  	// It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash).
   225  	stack       stack   // offset known to runtime/cgo
   226  	stackguard0 uintptr // offset known to liblink
   227  	stackguard1 uintptr // offset known to liblink
   228  
   229  	_panic         *_panic // innermost panic - offset known to liblink
   230  	_defer         *_defer // innermost defer
   231  	m              *m      // current m; offset known to arm liblink
   232  	stackAlloc     uintptr // stack allocation is [stack.lo,stack.lo+stackAlloc)
   233  	sched          gobuf
   234  	syscallsp      uintptr        // if status==Gsyscall, syscallsp = sched.sp to use during gc
   235  	syscallpc      uintptr        // if status==Gsyscall, syscallpc = sched.pc to use during gc
   236  	stkbar         []stkbar       // stack barriers, from low to high
   237  	stkbarPos      uintptr        // index of lowest stack barrier not hit
   238  	stktopsp       uintptr        // expected sp at top of stack, to check in traceback
   239  	param          unsafe.Pointer // passed parameter on wakeup
   240  	atomicstatus   uint32
   241  	stackLock      uint32 // sigprof/scang lock; TODO: fold in to atomicstatus
   242  	goid           int64
   243  	waitsince      int64  // approx time when the g become blocked
   244  	waitreason     string // if status==Gwaiting
   245  	schedlink      guintptr
   246  	preempt        bool   // preemption signal, duplicates stackguard0 = stackpreempt
   247  	paniconfault   bool   // panic (instead of crash) on unexpected fault address
   248  	preemptscan    bool   // preempted g does scan for gc
   249  	gcscandone     bool   // g has scanned stack; protected by _Gscan bit in status
   250  	gcscanvalid    bool   // false at start of gc cycle, true if G has not run since last scan
   251  	throwsplit     bool   // must not split stack
   252  	raceignore     int8   // ignore race detection events
   253  	sysblocktraced bool   // StartTrace has emitted EvGoInSyscall about this goroutine
   254  	sysexitticks   int64  // cputicks when syscall has returned (for tracing)
   255  	sysexitseq     uint64 // trace seq when syscall has returned (for tracing)
   256  	lockedm        *m
   257  	sig            uint32
   258  	writebuf       []byte
   259  	sigcode0       uintptr
   260  	sigcode1       uintptr
   261  	sigpc          uintptr
   262  	gopc           uintptr // pc of go statement that created this goroutine
   263  	startpc        uintptr // pc of goroutine function
   264  	racectx        uintptr
   265  	waiting        *sudog // sudog structures this g is waiting on (that have a valid elem ptr)
   266  
   267  	// Per-G gcController state
   268  
   269  	// gcAssistBytes is this G's GC assist credit in terms of
   270  	// bytes allocated. If this is positive, then the G has credit
   271  	// to allocate gcAssistBytes bytes without assisting. If this
   272  	// is negative, then the G must correct this by performing
   273  	// scan work. We track this in bytes to make it fast to update
   274  	// and check for debt in the malloc hot path. The assist ratio
   275  	// determines how this corresponds to scan work debt.
   276  	gcAssistBytes int64
   277  }
   278  
   279  type m struct {
   280  	g0      *g     // goroutine with scheduling stack
   281  	morebuf gobuf  // gobuf arg to morestack
   282  	divmod  uint32 // div/mod denominator for arm - known to liblink
   283  
   284  	// Fields not known to debuggers.
   285  	procid        uint64     // for debuggers, but offset not hard-coded
   286  	gsignal       *g         // signal-handling g
   287  	sigmask       sigset     // storage for saved signal mask
   288  	tls           [6]uintptr // thread-local storage (for x86 extern register)
   289  	mstartfn      func()
   290  	curg          *g       // current running goroutine
   291  	caughtsig     guintptr // goroutine running during fatal signal
   292  	p             puintptr // attached p for executing go code (nil if not executing go code)
   293  	nextp         puintptr
   294  	id            int32
   295  	mallocing     int32
   296  	throwing      int32
   297  	preemptoff    string // if != "", keep curg running on this m
   298  	locks         int32
   299  	softfloat     int32
   300  	dying         int32
   301  	profilehz     int32
   302  	helpgc        int32
   303  	spinning      bool // m is out of work and is actively looking for work
   304  	blocked       bool // m is blocked on a note
   305  	inwb          bool // m is executing a write barrier
   306  	printlock     int8
   307  	fastrand      uint32
   308  	ncgocall      uint64 // number of cgo calls in total
   309  	ncgo          int32  // number of cgo calls currently in progress
   310  	park          note
   311  	alllink       *m // on allm
   312  	schedlink     muintptr
   313  	machport      uint32 // return address for mach ipc (os x)
   314  	mcache        *mcache
   315  	lockedg       *g
   316  	createstack   [32]uintptr // stack that created this thread.
   317  	freglo        [16]uint32  // d[i] lsb and f[i]
   318  	freghi        [16]uint32  // d[i] msb and f[i+16]
   319  	fflag         uint32      // floating point compare flags
   320  	locked        uint32      // tracking for lockosthread
   321  	nextwaitm     uintptr     // next m waiting for lock
   322  	gcstats       gcstats
   323  	needextram    bool
   324  	traceback     uint8
   325  	waitunlockf   unsafe.Pointer // todo go func(*g, unsafe.pointer) bool
   326  	waitlock      unsafe.Pointer
   327  	waittraceev   byte
   328  	waittraceskip int
   329  	startingtrace bool
   330  	syscalltick   uint32
   331  	//#ifdef GOOS_windows
   332  	thread uintptr // thread handle
   333  	// these are here because they are too large to be on the stack
   334  	// of low-level NOSPLIT functions.
   335  	libcall   libcall
   336  	libcallpc uintptr // for cpu profiler
   337  	libcallsp uintptr
   338  	libcallg  guintptr
   339  	syscall   libcall // stores syscall parameters on windows
   340  	//#endif
   341  	mOS
   342  }
   343  
   344  type p struct {
   345  	lock mutex
   346  
   347  	id          int32
   348  	status      uint32 // one of pidle/prunning/...
   349  	link        puintptr
   350  	schedtick   uint32   // incremented on every scheduler call
   351  	syscalltick uint32   // incremented on every system call
   352  	m           muintptr // back-link to associated m (nil if idle)
   353  	mcache      *mcache
   354  
   355  	deferpool    [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
   356  	deferpoolbuf [5][32]*_defer
   357  
   358  	// Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
   359  	goidcache    uint64
   360  	goidcacheend uint64
   361  
   362  	// Queue of runnable goroutines. Accessed without lock.
   363  	runqhead uint32
   364  	runqtail uint32
   365  	runq     [256]guintptr
   366  	// runnext, if non-nil, is a runnable G that was ready'd by
   367  	// the current G and should be run next instead of what's in
   368  	// runq if there's time remaining in the running G's time
   369  	// slice. It will inherit the time left in the current time
   370  	// slice. If a set of goroutines is locked in a
   371  	// communicate-and-wait pattern, this schedules that set as a
   372  	// unit and eliminates the (potentially large) scheduling
   373  	// latency that otherwise arises from adding the ready'd
   374  	// goroutines to the end of the run queue.
   375  	runnext guintptr
   376  
   377  	// Available G's (status == Gdead)
   378  	gfree    *g
   379  	gfreecnt int32
   380  
   381  	sudogcache []*sudog
   382  	sudogbuf   [128]*sudog
   383  
   384  	tracebuf traceBufPtr
   385  
   386  	palloc persistentAlloc // per-P to avoid mutex
   387  
   388  	// Per-P GC state
   389  	gcAssistTime     int64 // Nanoseconds in assistAlloc
   390  	gcBgMarkWorker   *g
   391  	gcMarkWorkerMode gcMarkWorkerMode
   392  
   393  	// gcw is this P's GC work buffer cache. The work buffer is
   394  	// filled by write barriers, drained by mutator assists, and
   395  	// disposed on certain GC state transitions.
   396  	gcw gcWork
   397  
   398  	runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point
   399  
   400  	pad [64]byte
   401  }
   402  
   403  const (
   404  	// The max value of GOMAXPROCS.
   405  	// There are no fundamental restrictions on the value.
   406  	_MaxGomaxprocs = 1 << 8
   407  )
   408  
   409  type schedt struct {
   410  	lock mutex
   411  
   412  	goidgen uint64
   413  
   414  	midle        muintptr // idle m's waiting for work
   415  	nmidle       int32    // number of idle m's waiting for work
   416  	nmidlelocked int32    // number of locked m's waiting for work
   417  	mcount       int32    // number of m's that have been created
   418  	maxmcount    int32    // maximum number of m's allowed (or die)
   419  
   420  	pidle      puintptr // idle p's
   421  	npidle     uint32
   422  	nmspinning uint32 // limited to [0, 2^31-1]
   423  
   424  	// Global runnable queue.
   425  	runqhead guintptr
   426  	runqtail guintptr
   427  	runqsize int32
   428  
   429  	// Global cache of dead G's.
   430  	gflock mutex
   431  	gfree  *g
   432  	ngfree int32
   433  
   434  	// Central cache of sudog structs.
   435  	sudoglock  mutex
   436  	sudogcache *sudog
   437  
   438  	// Central pool of available defer structs of different sizes.
   439  	deferlock mutex
   440  	deferpool [5]*_defer
   441  
   442  	gcwaiting  uint32 // gc is waiting to run
   443  	stopwait   int32
   444  	stopnote   note
   445  	sysmonwait uint32
   446  	sysmonnote note
   447  	lastpoll   uint64
   448  
   449  	// safepointFn should be called on each P at the next GC
   450  	// safepoint if p.runSafePointFn is set.
   451  	safePointFn   func(*p)
   452  	safePointWait int32
   453  	safePointNote note
   454  
   455  	profilehz int32 // cpu profiling rate
   456  
   457  	procresizetime int64 // nanotime() of last change to gomaxprocs
   458  	totaltime      int64 // ∫gomaxprocs dt up to procresizetime
   459  }
   460  
   461  // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
   462  // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
   463  // External locks are not recursive; a second lock is silently ignored.
   464  // The upper bits of m->locked record the nesting depth of calls to lockOSThread
   465  // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
   466  // Internal locks can be recursive. For instance, a lock for cgo can occur while the main
   467  // goroutine is holding the lock during the initialization phase.
   468  const (
   469  	_LockExternal = 1
   470  	_LockInternal = 2
   471  )
   472  
   473  type sigtabtt struct {
   474  	flags int32
   475  	name  *int8
   476  }
   477  
   478  const (
   479  	_SigNotify   = 1 << iota // let signal.Notify have signal, even if from kernel
   480  	_SigKill                 // if signal.Notify doesn't take it, exit quietly
   481  	_SigThrow                // if signal.Notify doesn't take it, exit loudly
   482  	_SigPanic                // if the signal is from the kernel, panic
   483  	_SigDefault              // if the signal isn't explicitly requested, don't monitor it
   484  	_SigHandling             // our signal handler is registered
   485  	_SigIgnored              // the signal was ignored before we registered for it
   486  	_SigGoExit               // cause all runtime procs to exit (only used on Plan 9).
   487  	_SigSetStack             // add SA_ONSTACK to libc handler
   488  	_SigUnblock              // unblocked in minit
   489  )
   490  
   491  // Layout of in-memory per-function information prepared by linker
   492  // See https://golang.org/s/go12symtab.
   493  // Keep in sync with linker
   494  // and with package debug/gosym and with symtab.go in package runtime.
   495  type _func struct {
   496  	entry   uintptr // start pc
   497  	nameoff int32   // function name
   498  
   499  	args int32 // in/out args size
   500  	_    int32 // Previously: legacy frame size. TODO: Remove this.
   501  
   502  	pcsp      int32
   503  	pcfile    int32
   504  	pcln      int32
   505  	npcdata   int32
   506  	nfuncdata int32
   507  }
   508  
   509  // layout of Itab known to compilers
   510  // allocated in non-garbage-collected memory
   511  type itab struct {
   512  	inter  *interfacetype
   513  	_type  *_type
   514  	link   *itab
   515  	bad    int32
   516  	unused int32
   517  	fun    [1]uintptr // variable sized
   518  }
   519  
   520  // Lock-free stack node.
   521  // // Also known to export_test.go.
   522  type lfnode struct {
   523  	next    uint64
   524  	pushcnt uintptr
   525  }
   526  
   527  type forcegcstate struct {
   528  	lock mutex
   529  	g    *g
   530  	idle uint32
   531  }
   532  
   533  /*
   534   * known to compiler
   535   */
   536  const (
   537  	_Structrnd = sys.RegSize
   538  )
   539  
   540  // startup_random_data holds random bytes initialized at startup.  These come from
   541  // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
   542  var startupRandomData []byte
   543  
   544  // extendRandom extends the random numbers in r[:n] to the whole slice r.
   545  // Treats n<0 as n==0.
   546  func extendRandom(r []byte, n int) {
   547  	if n < 0 {
   548  		n = 0
   549  	}
   550  	for n < len(r) {
   551  		// Extend random bits using hash function & time seed
   552  		w := n
   553  		if w > 16 {
   554  			w = 16
   555  		}
   556  		h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
   557  		for i := 0; i < sys.PtrSize && n < len(r); i++ {
   558  			r[n] = byte(h)
   559  			n++
   560  			h >>= 8
   561  		}
   562  	}
   563  }
   564  
   565  /*
   566   * deferred subroutine calls
   567   */
   568  type _defer struct {
   569  	siz     int32
   570  	started bool
   571  	sp      uintptr // sp at time of defer
   572  	pc      uintptr
   573  	fn      *funcval
   574  	_panic  *_panic // panic that is running defer
   575  	link    *_defer
   576  }
   577  
   578  /*
   579   * panics
   580   */
   581  type _panic struct {
   582  	argp      unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
   583  	arg       interface{}    // argument to panic
   584  	link      *_panic        // link to earlier panic
   585  	recovered bool           // whether this panic is over
   586  	aborted   bool           // the panic was aborted
   587  }
   588  
   589  /*
   590   * stack traces
   591   */
   592  
   593  type stkframe struct {
   594  	fn       *_func     // function being run
   595  	pc       uintptr    // program counter within fn
   596  	continpc uintptr    // program counter where execution can continue, or 0 if not
   597  	lr       uintptr    // program counter at caller aka link register
   598  	sp       uintptr    // stack pointer at pc
   599  	fp       uintptr    // stack pointer at caller aka frame pointer
   600  	varp     uintptr    // top of local variables
   601  	argp     uintptr    // pointer to function arguments
   602  	arglen   uintptr    // number of bytes at argp
   603  	argmap   *bitvector // force use of this argmap
   604  }
   605  
   606  const (
   607  	_TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions.
   608  	_TraceTrap                      // the initial PC, SP are from a trap, not a return PC from a call
   609  	_TraceJumpStack                 // if traceback is on a systemstack, resume trace at g that called into it
   610  )
   611  
   612  const (
   613  	// The maximum number of frames we print for a traceback
   614  	_TracebackMaxFrames = 100
   615  )
   616  
   617  var (
   618  	emptystring string
   619  	allglen     uintptr
   620  	allm        *m
   621  	allp        [_MaxGomaxprocs + 1]*p
   622  	gomaxprocs  int32
   623  	panicking   uint32
   624  	ncpu        int32
   625  	forcegc     forcegcstate
   626  	sched       schedt
   627  	newprocs    int32
   628  
   629  	// Information about what cpu features are available.
   630  	// Set on startup in asm_{x86,amd64}.s.
   631  	cpuid_ecx         uint32
   632  	cpuid_edx         uint32
   633  	lfenceBeforeRdtsc bool
   634  	support_avx       bool
   635  	support_avx2      bool
   636  
   637  	goarm uint8 // set by cmd/link on arm systems
   638  )
   639  
   640  // Set by the linker so the runtime can determine the buildmode.
   641  var (
   642  	islibrary bool // -buildmode=c-shared
   643  	isarchive bool // -buildmode=c-archive
   644  )
   645  
   646  /*
   647   * mutual exclusion locks.  in the uncontended case,
   648   * as fast as spin locks (just a few user-level instructions),
   649   * but on the contention path they sleep in the kernel.
   650   * a zeroed Mutex is unlocked (no need to initialize each lock).
   651   */
   652  
   653  /*
   654   * sleep and wakeup on one-time events.
   655   * before any calls to notesleep or notewakeup,
   656   * must call noteclear to initialize the Note.
   657   * then, exactly one thread can call notesleep
   658   * and exactly one thread can call notewakeup (once).
   659   * once notewakeup has been called, the notesleep
   660   * will return.  future notesleep will return immediately.
   661   * subsequent noteclear must be called only after
   662   * previous notesleep has returned, e.g. it's disallowed
   663   * to call noteclear straight after notewakeup.
   664   *
   665   * notetsleep is like notesleep but wakes up after
   666   * a given number of nanoseconds even if the event
   667   * has not yet happened.  if a goroutine uses notetsleep to
   668   * wake up early, it must wait to call noteclear until it
   669   * can be sure that no other goroutine is calling
   670   * notewakeup.
   671   *
   672   * notesleep/notetsleep are generally called on g0,
   673   * notetsleepg is similar to notetsleep but is called on user g.
   674   */
   675  // bool	runtime·notetsleep(Note*, int64);  // false - timeout
   676  // bool	runtime·notetsleepg(Note*, int64);  // false - timeout
   677  
   678  /*
   679   * Lock-free stack.
   680   * Initialize uint64 head to 0, compare with 0 to test for emptiness.
   681   * The stack does not keep pointers to nodes,
   682   * so they can be garbage collected if there are no other pointers to nodes.
   683   */
   684  
   685  // for mmap, we only pass the lower 32 bits of file offset to the
   686  // assembly routine; the higher bits (if required), should be provided
   687  // by the assembly routine as 0.