github.com/miolini/go@v0.0.0-20160405192216-fca68c8cb408/src/runtime/runtime2.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 /* 14 * defined constants 15 */ 16 const ( 17 // G status 18 // 19 // Beyond indicating the general state of a G, the G status 20 // acts like a lock on the goroutine's stack (and hence its 21 // ability to execute user code). 22 // 23 // If you add to this list, add to the list 24 // of "okay during garbage collection" status 25 // in mgcmark.go too. 26 27 // _Gidle means this goroutine was just allocated and has not 28 // yet been initialized. 29 _Gidle = iota // 0 30 31 // _Grunnable means this goroutine is on a run queue. It is 32 // not currently executing user code. The stack is not owned. 33 _Grunnable // 1 34 35 // _Grunning means this goroutine may execute user code. The 36 // stack is owned by this goroutine. It is not on a run queue. 37 // It is assigned an M and a P. 38 _Grunning // 2 39 40 // _Gsyscall means this goroutine is executing a system call. 41 // It is not executing user code. The stack is owned by this 42 // goroutine. It is not on a run queue. It is assigned an M. 43 _Gsyscall // 3 44 45 // _Gwaiting means this goroutine is blocked in the runtime. 46 // It is not executing user code. It is not on a run queue, 47 // but should be recorded somewhere (e.g., a channel wait 48 // queue) so it can be ready()d when necessary. The stack is 49 // not owned *except* that a channel operation may read or 50 // write parts of the stack under the appropriate channel 51 // lock. Otherwise, it is not safe to access the stack after a 52 // goroutine enters _Gwaiting (e.g., it may get moved). 53 _Gwaiting // 4 54 55 // _Gmoribund_unused is currently unused, but hardcoded in gdb 56 // scripts. 57 _Gmoribund_unused // 5 58 59 // _Gdead means this goroutine is currently unused. It may be 60 // just exited, on a free list, or just being initialized. It 61 // is not executing user code. It may or may not have a stack 62 // allocated. The G and its stack (if any) are owned by the M 63 // that is exiting the G or that obtained the G from the free 64 // list. 65 _Gdead // 6 66 67 // _Genqueue_unused is currently unused. 68 _Genqueue_unused // 7 69 70 // _Gcopystack means this goroutine's stack is being moved. It 71 // is not executing user code and is not on a run queue. The 72 // stack is owned by the goroutine that put it in _Gcopystack. 73 _Gcopystack // 8 74 75 // _Gscan combined with one of the above states other than 76 // _Grunning indicates that GC is scanning the stack. The 77 // goroutine is not executing user code and the stack is owned 78 // by the goroutine that set the _Gscan bit. 79 // 80 // _Gscanrunning is different: it is used to briefly block 81 // state transitions while GC signals the G to scan its own 82 // stack. This is otherwise like _Grunning. 83 // 84 // atomicstatus&~Gscan gives the state the goroutine will 85 // return to when the scan completes. 86 _Gscan = 0x1000 87 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 88 _Gscanrunning = _Gscan + _Grunning // 0x1002 89 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 90 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 91 ) 92 93 const ( 94 // P status 95 _Pidle = iota 96 _Prunning // Only this P is allowed to change from _Prunning. 97 _Psyscall 98 _Pgcstop 99 _Pdead 100 ) 101 102 type mutex struct { 103 // Futex-based impl treats it as uint32 key, 104 // while sema-based impl as M* waitm. 105 // Used to be a union, but unions break precise GC. 106 key uintptr 107 } 108 109 type note struct { 110 // Futex-based impl treats it as uint32 key, 111 // while sema-based impl as M* waitm. 112 // Used to be a union, but unions break precise GC. 113 key uintptr 114 } 115 116 type funcval struct { 117 fn uintptr 118 // variable-size, fn-specific data here 119 } 120 121 type iface struct { 122 tab *itab 123 data unsafe.Pointer 124 } 125 126 type eface struct { 127 _type *_type 128 data unsafe.Pointer 129 } 130 131 func efaceOf(ep *interface{}) *eface { 132 return (*eface)(unsafe.Pointer(ep)) 133 } 134 135 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 136 // It is particularly important to avoid write barriers when the current P has 137 // been released, because the GC thinks the world is stopped, and an 138 // unexpected write barrier would not be synchronized with the GC, 139 // which can lead to a half-executed write barrier that has marked the object 140 // but not queued it. If the GC skips the object and completes before the 141 // queuing can occur, it will incorrectly free the object. 142 // 143 // We tried using special assignment functions invoked only when not 144 // holding a running P, but then some updates to a particular memory 145 // word went through write barriers and some did not. This breaks the 146 // write barrier shadow checking mode, and it is also scary: better to have 147 // a word that is completely ignored by the GC than to have one for which 148 // only a few updates are ignored. 149 // 150 // Gs, Ms, and Ps are always reachable via true pointers in the 151 // allgs, allm, and allp lists or (during allocation before they reach those lists) 152 // from stack variables. 153 154 // A guintptr holds a goroutine pointer, but typed as a uintptr 155 // to bypass write barriers. It is used in the Gobuf goroutine state 156 // and in scheduling lists that are manipulated without a P. 157 // 158 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 159 // In one of the few places it is updated by Go code - func save - it must be 160 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 161 // Instead of figuring out how to emit the write barriers missing in the 162 // assembly manipulation, we change the type of the field to uintptr, 163 // so that it does not require write barriers at all. 164 // 165 // Goroutine structs are published in the allg list and never freed. 166 // That will keep the goroutine structs from being collected. 167 // There is never a time that Gobuf.g's contain the only references 168 // to a goroutine: the publishing of the goroutine in allg comes first. 169 // Goroutine pointers are also kept in non-GC-visible places like TLS, 170 // so I can't see them ever moving. If we did want to start moving data 171 // in the GC, we'd need to allocate the goroutine structs from an 172 // alternate arena. Using guintptr doesn't make that problem any worse. 173 type guintptr uintptr 174 175 //go:nosplit 176 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 177 178 //go:nosplit 179 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 180 181 //go:nosplit 182 func (gp *guintptr) cas(old, new guintptr) bool { 183 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 184 } 185 186 type puintptr uintptr 187 188 //go:nosplit 189 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 190 191 //go:nosplit 192 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 193 194 type muintptr uintptr 195 196 //go:nosplit 197 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 198 199 //go:nosplit 200 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 201 202 type gobuf struct { 203 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 204 sp uintptr 205 pc uintptr 206 g guintptr 207 ctxt unsafe.Pointer // this has to be a pointer so that gc scans it 208 ret sys.Uintreg 209 lr uintptr 210 bp uintptr // for GOEXPERIMENT=framepointer 211 } 212 213 // sudog represents a g in a wait list, such as for sending/receiving 214 // on a channel. 215 // 216 // sudog is necessary because the g ↔ synchronization object relation 217 // is many-to-many. A g can be on many wait lists, so there may be 218 // many sudogs for one g; and many gs may be waiting on the same 219 // synchronization object, so there may be many sudogs for one object. 220 // 221 // sudogs are allocated from a special pool. Use acquireSudog and 222 // releaseSudog to allocate and free them. 223 type sudog struct { 224 // The following fields are protected by the hchan.lock of the 225 // channel this sudog is blocking on. shrinkstack depends on 226 // this. 227 228 g *g 229 selectdone *uint32 // CAS to 1 to win select race (may point to stack) 230 next *sudog 231 prev *sudog 232 elem unsafe.Pointer // data element (may point to stack) 233 234 // The following fields are never accessed concurrently. 235 // waitlink is only accessed by g. 236 237 releasetime int64 238 ticket uint32 239 waitlink *sudog // g.waiting list 240 c *hchan // channel 241 } 242 243 type gcstats struct { 244 // the struct must consist of only uint64's, 245 // because it is casted to uint64[]. 246 nhandoff uint64 247 nhandoffcnt uint64 248 nprocyield uint64 249 nosyield uint64 250 nsleep uint64 251 } 252 253 type libcall struct { 254 fn uintptr 255 n uintptr // number of parameters 256 args uintptr // parameters 257 r1 uintptr // return values 258 r2 uintptr 259 err uintptr // error number 260 } 261 262 // describes how to handle callback 263 type wincallbackcontext struct { 264 gobody unsafe.Pointer // go function to call 265 argsize uintptr // callback arguments size (in bytes) 266 restorestack uintptr // adjust stack on return by (in bytes) (386 only) 267 cleanstack bool 268 } 269 270 // Stack describes a Go execution stack. 271 // The bounds of the stack are exactly [lo, hi), 272 // with no implicit data structures on either side. 273 type stack struct { 274 lo uintptr 275 hi uintptr 276 } 277 278 // stkbar records the state of a G's stack barrier. 279 type stkbar struct { 280 savedLRPtr uintptr // location overwritten by stack barrier PC 281 savedLRVal uintptr // value overwritten at savedLRPtr 282 } 283 284 type g struct { 285 // Stack parameters. 286 // stack describes the actual stack memory: [stack.lo, stack.hi). 287 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 288 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 289 // stackguard1 is the stack pointer compared in the C stack growth prologue. 290 // It is stack.lo+StackGuard on g0 and gsignal stacks. 291 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 292 stack stack // offset known to runtime/cgo 293 stackguard0 uintptr // offset known to liblink 294 stackguard1 uintptr // offset known to liblink 295 296 _panic *_panic // innermost panic - offset known to liblink 297 _defer *_defer // innermost defer 298 m *m // current m; offset known to arm liblink 299 stackAlloc uintptr // stack allocation is [stack.lo,stack.lo+stackAlloc) 300 sched gobuf 301 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 302 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 303 stkbar []stkbar // stack barriers, from low to high (see top of mstkbar.go) 304 stkbarPos uintptr // index of lowest stack barrier not hit 305 stktopsp uintptr // expected sp at top of stack, to check in traceback 306 param unsafe.Pointer // passed parameter on wakeup 307 atomicstatus uint32 308 stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 309 goid int64 310 waitsince int64 // approx time when the g become blocked 311 waitreason string // if status==Gwaiting 312 schedlink guintptr 313 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 314 paniconfault bool // panic (instead of crash) on unexpected fault address 315 preemptscan bool // preempted g does scan for gc 316 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 317 gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan 318 throwsplit bool // must not split stack 319 raceignore int8 // ignore race detection events 320 sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine 321 sysexitticks int64 // cputicks when syscall has returned (for tracing) 322 sysexitseq uint64 // trace seq when syscall has returned (for tracing) 323 lockedm *m 324 sig uint32 325 writebuf []byte 326 sigcode0 uintptr 327 sigcode1 uintptr 328 sigpc uintptr 329 gopc uintptr // pc of go statement that created this goroutine 330 startpc uintptr // pc of goroutine function 331 racectx uintptr 332 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order 333 334 // Per-G gcController state 335 336 // gcAssistBytes is this G's GC assist credit in terms of 337 // bytes allocated. If this is positive, then the G has credit 338 // to allocate gcAssistBytes bytes without assisting. If this 339 // is negative, then the G must correct this by performing 340 // scan work. We track this in bytes to make it fast to update 341 // and check for debt in the malloc hot path. The assist ratio 342 // determines how this corresponds to scan work debt. 343 gcAssistBytes int64 344 } 345 346 type m struct { 347 g0 *g // goroutine with scheduling stack 348 morebuf gobuf // gobuf arg to morestack 349 divmod uint32 // div/mod denominator for arm - known to liblink 350 351 // Fields not known to debuggers. 352 procid uint64 // for debuggers, but offset not hard-coded 353 gsignal *g // signal-handling g 354 sigmask sigset // storage for saved signal mask 355 tls [6]uintptr // thread-local storage (for x86 extern register) 356 mstartfn func() 357 curg *g // current running goroutine 358 caughtsig guintptr // goroutine running during fatal signal 359 p puintptr // attached p for executing go code (nil if not executing go code) 360 nextp puintptr 361 id int32 362 mallocing int32 363 throwing int32 364 preemptoff string // if != "", keep curg running on this m 365 locks int32 366 softfloat int32 367 dying int32 368 profilehz int32 369 helpgc int32 370 spinning bool // m is out of work and is actively looking for work 371 blocked bool // m is blocked on a note 372 inwb bool // m is executing a write barrier 373 newSigstack bool // minit on C thread called sigaltstack 374 printlock int8 375 fastrand uint32 376 ncgocall uint64 // number of cgo calls in total 377 ncgo int32 // number of cgo calls currently in progress 378 cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily 379 cgoCallers *cgoCallers // cgo traceback if crashing in cgo call 380 park note 381 alllink *m // on allm 382 schedlink muintptr 383 mcache *mcache 384 lockedg *g 385 createstack [32]uintptr // stack that created this thread. 386 freglo [16]uint32 // d[i] lsb and f[i] 387 freghi [16]uint32 // d[i] msb and f[i+16] 388 fflag uint32 // floating point compare flags 389 locked uint32 // tracking for lockosthread 390 nextwaitm uintptr // next m waiting for lock 391 gcstats gcstats 392 needextram bool 393 traceback uint8 394 waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool 395 waitlock unsafe.Pointer 396 waittraceev byte 397 waittraceskip int 398 startingtrace bool 399 syscalltick uint32 400 //#ifdef GOOS_windows 401 thread uintptr // thread handle 402 // these are here because they are too large to be on the stack 403 // of low-level NOSPLIT functions. 404 libcall libcall 405 libcallpc uintptr // for cpu profiler 406 libcallsp uintptr 407 libcallg guintptr 408 syscall libcall // stores syscall parameters on windows 409 //#endif 410 mOS 411 } 412 413 type p struct { 414 lock mutex 415 416 id int32 417 status uint32 // one of pidle/prunning/... 418 link puintptr 419 schedtick uint32 // incremented on every scheduler call 420 syscalltick uint32 // incremented on every system call 421 m muintptr // back-link to associated m (nil if idle) 422 mcache *mcache 423 424 deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go) 425 deferpoolbuf [5][32]*_defer 426 427 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. 428 goidcache uint64 429 goidcacheend uint64 430 431 // Queue of runnable goroutines. Accessed without lock. 432 runqhead uint32 433 runqtail uint32 434 runq [256]guintptr 435 // runnext, if non-nil, is a runnable G that was ready'd by 436 // the current G and should be run next instead of what's in 437 // runq if there's time remaining in the running G's time 438 // slice. It will inherit the time left in the current time 439 // slice. If a set of goroutines is locked in a 440 // communicate-and-wait pattern, this schedules that set as a 441 // unit and eliminates the (potentially large) scheduling 442 // latency that otherwise arises from adding the ready'd 443 // goroutines to the end of the run queue. 444 runnext guintptr 445 446 // Available G's (status == Gdead) 447 gfree *g 448 gfreecnt int32 449 450 sudogcache []*sudog 451 sudogbuf [128]*sudog 452 453 tracebuf traceBufPtr 454 455 palloc persistentAlloc // per-P to avoid mutex 456 457 // Per-P GC state 458 gcAssistTime int64 // Nanoseconds in assistAlloc 459 gcBgMarkWorker guintptr 460 gcMarkWorkerMode gcMarkWorkerMode 461 462 // gcw is this P's GC work buffer cache. The work buffer is 463 // filled by write barriers, drained by mutator assists, and 464 // disposed on certain GC state transitions. 465 gcw gcWork 466 467 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 468 469 pad [64]byte 470 } 471 472 const ( 473 // The max value of GOMAXPROCS. 474 // There are no fundamental restrictions on the value. 475 _MaxGomaxprocs = 1 << 8 476 ) 477 478 type schedt struct { 479 // accessed atomically. keep at top to ensure alignment on 32-bit systems. 480 goidgen uint64 481 lastpoll uint64 482 483 lock mutex 484 485 midle muintptr // idle m's waiting for work 486 nmidle int32 // number of idle m's waiting for work 487 nmidlelocked int32 // number of locked m's waiting for work 488 mcount int32 // number of m's that have been created 489 maxmcount int32 // maximum number of m's allowed (or die) 490 491 ngsys uint32 // number of system goroutines; updated atomically 492 493 pidle puintptr // idle p's 494 npidle uint32 495 nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go. 496 497 // Global runnable queue. 498 runqhead guintptr 499 runqtail guintptr 500 runqsize int32 501 502 // Global cache of dead G's. 503 gflock mutex 504 gfree *g 505 ngfree int32 506 507 // Central cache of sudog structs. 508 sudoglock mutex 509 sudogcache *sudog 510 511 // Central pool of available defer structs of different sizes. 512 deferlock mutex 513 deferpool [5]*_defer 514 515 gcwaiting uint32 // gc is waiting to run 516 stopwait int32 517 stopnote note 518 sysmonwait uint32 519 sysmonnote note 520 521 // safepointFn should be called on each P at the next GC 522 // safepoint if p.runSafePointFn is set. 523 safePointFn func(*p) 524 safePointWait int32 525 safePointNote note 526 527 profilehz int32 // cpu profiling rate 528 529 procresizetime int64 // nanotime() of last change to gomaxprocs 530 totaltime int64 // ∫gomaxprocs dt up to procresizetime 531 } 532 533 // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread. 534 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active. 535 // External locks are not recursive; a second lock is silently ignored. 536 // The upper bits of m->locked record the nesting depth of calls to lockOSThread 537 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal). 538 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main 539 // goroutine is holding the lock during the initialization phase. 540 const ( 541 _LockExternal = 1 542 _LockInternal = 2 543 ) 544 545 type sigtabtt struct { 546 flags int32 547 name *int8 548 } 549 550 const ( 551 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 552 _SigKill // if signal.Notify doesn't take it, exit quietly 553 _SigThrow // if signal.Notify doesn't take it, exit loudly 554 _SigPanic // if the signal is from the kernel, panic 555 _SigDefault // if the signal isn't explicitly requested, don't monitor it 556 _SigHandling // our signal handler is registered 557 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 558 _SigSetStack // add SA_ONSTACK to libc handler 559 _SigUnblock // unblocked in minit 560 ) 561 562 // Layout of in-memory per-function information prepared by linker 563 // See https://golang.org/s/go12symtab. 564 // Keep in sync with linker 565 // and with package debug/gosym and with symtab.go in package runtime. 566 type _func struct { 567 entry uintptr // start pc 568 nameoff int32 // function name 569 570 args int32 // in/out args size 571 _ int32 // previously legacy frame size; kept for layout compatibility 572 573 pcsp int32 574 pcfile int32 575 pcln int32 576 npcdata int32 577 nfuncdata int32 578 } 579 580 // layout of Itab known to compilers 581 // allocated in non-garbage-collected memory 582 // Needs to be in sync with 583 // ../cmd/compile/internal/gc/reflect.go:/^func.dumptypestructs. 584 type itab struct { 585 inter *interfacetype 586 _type *_type 587 link *itab 588 bad int32 589 unused int32 590 fun [1]uintptr // variable sized 591 } 592 593 // Lock-free stack node. 594 // // Also known to export_test.go. 595 type lfnode struct { 596 next uint64 597 pushcnt uintptr 598 } 599 600 type forcegcstate struct { 601 lock mutex 602 g *g 603 idle uint32 604 } 605 606 /* 607 * known to compiler 608 */ 609 const ( 610 _Structrnd = sys.RegSize 611 ) 612 613 // startup_random_data holds random bytes initialized at startup. These come from 614 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). 615 var startupRandomData []byte 616 617 // extendRandom extends the random numbers in r[:n] to the whole slice r. 618 // Treats n<0 as n==0. 619 func extendRandom(r []byte, n int) { 620 if n < 0 { 621 n = 0 622 } 623 for n < len(r) { 624 // Extend random bits using hash function & time seed 625 w := n 626 if w > 16 { 627 w = 16 628 } 629 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w)) 630 for i := 0; i < sys.PtrSize && n < len(r); i++ { 631 r[n] = byte(h) 632 n++ 633 h >>= 8 634 } 635 } 636 } 637 638 /* 639 * deferred subroutine calls 640 */ 641 type _defer struct { 642 siz int32 643 started bool 644 sp uintptr // sp at time of defer 645 pc uintptr 646 fn *funcval 647 _panic *_panic // panic that is running defer 648 link *_defer 649 } 650 651 /* 652 * panics 653 */ 654 type _panic struct { 655 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink 656 arg interface{} // argument to panic 657 link *_panic // link to earlier panic 658 recovered bool // whether this panic is over 659 aborted bool // the panic was aborted 660 } 661 662 /* 663 * stack traces 664 */ 665 666 type stkframe struct { 667 fn *_func // function being run 668 pc uintptr // program counter within fn 669 continpc uintptr // program counter where execution can continue, or 0 if not 670 lr uintptr // program counter at caller aka link register 671 sp uintptr // stack pointer at pc 672 fp uintptr // stack pointer at caller aka frame pointer 673 varp uintptr // top of local variables 674 argp uintptr // pointer to function arguments 675 arglen uintptr // number of bytes at argp 676 argmap *bitvector // force use of this argmap 677 } 678 679 const ( 680 _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions. 681 _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call 682 _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it 683 ) 684 685 const ( 686 // The maximum number of frames we print for a traceback 687 _TracebackMaxFrames = 100 688 ) 689 690 var ( 691 emptystring string 692 allglen uintptr 693 allm *m 694 allp [_MaxGomaxprocs + 1]*p 695 gomaxprocs int32 696 panicking uint32 697 ncpu int32 698 forcegc forcegcstate 699 sched schedt 700 newprocs int32 701 702 // Information about what cpu features are available. 703 // Set on startup in asm_{x86,amd64}.s. 704 cpuid_ecx uint32 705 cpuid_edx uint32 706 cpuid_ebx7 uint32 707 lfenceBeforeRdtsc bool 708 support_avx bool 709 support_avx2 bool 710 711 goarm uint8 // set by cmd/link on arm systems 712 ) 713 714 // Set by the linker so the runtime can determine the buildmode. 715 var ( 716 islibrary bool // -buildmode=c-shared 717 isarchive bool // -buildmode=c-archive 718 ) 719 720 /* 721 * mutual exclusion locks. in the uncontended case, 722 * as fast as spin locks (just a few user-level instructions), 723 * but on the contention path they sleep in the kernel. 724 * a zeroed Mutex is unlocked (no need to initialize each lock). 725 */ 726 727 /* 728 * sleep and wakeup on one-time events. 729 * before any calls to notesleep or notewakeup, 730 * must call noteclear to initialize the Note. 731 * then, exactly one thread can call notesleep 732 * and exactly one thread can call notewakeup (once). 733 * once notewakeup has been called, the notesleep 734 * will return. future notesleep will return immediately. 735 * subsequent noteclear must be called only after 736 * previous notesleep has returned, e.g. it's disallowed 737 * to call noteclear straight after notewakeup. 738 * 739 * notetsleep is like notesleep but wakes up after 740 * a given number of nanoseconds even if the event 741 * has not yet happened. if a goroutine uses notetsleep to 742 * wake up early, it must wait to call noteclear until it 743 * can be sure that no other goroutine is calling 744 * notewakeup. 745 * 746 * notesleep/notetsleep are generally called on g0, 747 * notetsleepg is similar to notetsleep but is called on user g. 748 */ 749 // bool runtime·notetsleep(Note*, int64); // false - timeout 750 // bool runtime·notetsleepg(Note*, int64); // false - timeout 751 752 /* 753 * Lock-free stack. 754 * Initialize uint64 head to 0, compare with 0 to test for emptiness. 755 * The stack does not keep pointers to nodes, 756 * so they can be garbage collected if there are no other pointers to nodes. 757 */ 758 759 // for mmap, we only pass the lower 32 bits of file offset to the 760 // assembly routine; the higher bits (if required), should be provided 761 // by the assembly routine as 0.