github.com/reiver/go@v0.0.0-20150109200633-1d0c7792f172/src/runtime/runtime2.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 /* 10 * defined constants 11 */ 12 const ( 13 // G status 14 // 15 // If you add to this list, add to the list 16 // of "okay during garbage collection" status 17 // in mgc0.c too. 18 _Gidle = iota // 0 19 _Grunnable // 1 runnable and on a run queue 20 _Grunning // 2 21 _Gsyscall // 3 22 _Gwaiting // 4 23 _Gmoribund_unused // 5 currently unused, but hardcoded in gdb scripts 24 _Gdead // 6 25 _Genqueue // 7 Only the Gscanenqueue is used. 26 _Gcopystack // 8 in this state when newstack is moving the stack 27 // the following encode that the GC is scanning the stack and what to do when it is done 28 _Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state, 29 // _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs 30 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning complets make Grunnable (it is already on run queue) 31 _Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack. 32 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make is Gsyscall 33 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting 34 // _Gscanmoribund_unused, // not possible 35 // _Gscandead, // not possible 36 _Gscanenqueue = _Gscan + _Genqueue // When scanning completes make it Grunnable and put on runqueue 37 ) 38 39 const ( 40 // P status 41 _Pidle = iota 42 _Prunning 43 _Psyscall 44 _Pgcstop 45 _Pdead 46 ) 47 48 // The next line makes 'go generate' write the zgen_*.go files with 49 // per-OS and per-arch information, including constants 50 // named goos_$GOOS and goarch_$GOARCH for every 51 // known GOOS and GOARCH. The constant is 1 on the 52 // current system, 0 otherwise; multiplying by them is 53 // useful for defining GOOS- or GOARCH-specific constants. 54 //go:generate go run gengoos.go 55 56 type mutex struct { 57 // Futex-based impl treats it as uint32 key, 58 // while sema-based impl as M* waitm. 59 // Used to be a union, but unions break precise GC. 60 key uintptr 61 } 62 63 type note struct { 64 // Futex-based impl treats it as uint32 key, 65 // while sema-based impl as M* waitm. 66 // Used to be a union, but unions break precise GC. 67 key uintptr 68 } 69 70 type _string struct { 71 str *byte 72 len int 73 } 74 75 type funcval struct { 76 fn uintptr 77 // variable-size, fn-specific data here 78 } 79 80 type iface struct { 81 tab *itab 82 data unsafe.Pointer 83 } 84 85 type eface struct { 86 _type *_type 87 data unsafe.Pointer 88 } 89 90 type slice struct { 91 array *byte // actual data 92 len uint // number of elements 93 cap uint // allocated number of elements 94 } 95 96 // A guintptr holds a goroutine pointer, but typed as a uintptr 97 // to bypass write barriers. It is used in the Gobuf goroutine state. 98 // 99 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 100 // In one of the few places it is updated by Go code - func save - it must be 101 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 102 // Instead of figuring out how to emit the write barriers missing in the 103 // assembly manipulation, we change the type of the field to uintptr, 104 // so that it does not require write barriers at all. 105 // 106 // Goroutine structs are published in the allg list and never freed. 107 // That will keep the goroutine structs from being collected. 108 // There is never a time that Gobuf.g's contain the only references 109 // to a goroutine: the publishing of the goroutine in allg comes first. 110 // Goroutine pointers are also kept in non-GC-visible places like TLS, 111 // so I can't see them ever moving. If we did want to start moving data 112 // in the GC, we'd need to allocate the goroutine structs from an 113 // alternate arena. Using guintptr doesn't make that problem any worse. 114 type guintptr uintptr 115 116 func (gp guintptr) ptr() *g { 117 return (*g)(unsafe.Pointer(gp)) 118 } 119 120 type gobuf struct { 121 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 122 sp uintptr 123 pc uintptr 124 g guintptr 125 ctxt unsafe.Pointer // this has to be a pointer so that gc scans it 126 ret uintreg 127 lr uintptr 128 } 129 130 // Known to compiler. 131 // Changes here must also be made in src/cmd/gc/select.c's selecttype. 132 type sudog struct { 133 g *g 134 selectdone *uint32 135 next *sudog 136 prev *sudog 137 elem unsafe.Pointer // data element 138 releasetime int64 139 nrelease int32 // -1 for acquire 140 waitlink *sudog // g.waiting list 141 } 142 143 type gcstats struct { 144 // the struct must consist of only uint64's, 145 // because it is casted to uint64[]. 146 nhandoff uint64 147 nhandoffcnt uint64 148 nprocyield uint64 149 nosyield uint64 150 nsleep uint64 151 } 152 153 type libcall struct { 154 fn uintptr 155 n uintptr // number of parameters 156 args uintptr // parameters 157 r1 uintptr // return values 158 r2 uintptr 159 err uintptr // error number 160 } 161 162 // describes how to handle callback 163 type wincallbackcontext struct { 164 gobody unsafe.Pointer // go function to call 165 argsize uintptr // callback arguments size (in bytes) 166 restorestack uintptr // adjust stack on return by (in bytes) (386 only) 167 cleanstack bool 168 } 169 170 // Stack describes a Go execution stack. 171 // The bounds of the stack are exactly [lo, hi), 172 // with no implicit data structures on either side. 173 type stack struct { 174 lo uintptr 175 hi uintptr 176 } 177 178 type g struct { 179 // Stack parameters. 180 // stack describes the actual stack memory: [stack.lo, stack.hi). 181 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 182 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 183 // stackguard1 is the stack pointer compared in the C stack growth prologue. 184 // It is stack.lo+StackGuard on g0 and gsignal stacks. 185 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 186 stack stack // offset known to runtime/cgo 187 stackguard0 uintptr // offset known to liblink 188 stackguard1 uintptr // offset known to liblink 189 190 _panic *_panic // innermost panic - offset known to liblink 191 _defer *_defer // innermost defer 192 sched gobuf 193 syscallsp uintptr // if status==gsyscall, syscallsp = sched.sp to use during gc 194 syscallpc uintptr // if status==gsyscall, syscallpc = sched.pc to use during gc 195 param unsafe.Pointer // passed parameter on wakeup 196 atomicstatus uint32 197 goid int64 198 waitsince int64 // approx time when the g become blocked 199 waitreason string // if status==gwaiting 200 schedlink *g 201 issystem bool // do not output in stack dump, ignore in deadlock detector 202 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 203 paniconfault bool // panic (instead of crash) on unexpected fault address 204 preemptscan bool // preempted g does scan for gc 205 gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle 206 throwsplit bool // must not split stack 207 raceignore int8 // ignore race detection events 208 m *m // for debuggers, but offset not hard-coded 209 lockedm *m 210 sig uint32 211 writebuf []byte 212 sigcode0 uintptr 213 sigcode1 uintptr 214 sigpc uintptr 215 gopc uintptr // pc of go statement that created this goroutine 216 racectx uintptr 217 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr) 218 } 219 220 type mts struct { 221 tv_sec int64 222 tv_nsec int64 223 } 224 225 type mscratch struct { 226 v [6]uintptr 227 } 228 229 type m struct { 230 g0 *g // goroutine with scheduling stack 231 morebuf gobuf // gobuf arg to morestack 232 233 // Fields not known to debuggers. 234 procid uint64 // for debuggers, but offset not hard-coded 235 gsignal *g // signal-handling g 236 tls [4]uintptr // thread-local storage (for x86 extern register) 237 mstartfn unsafe.Pointer // todo go func() 238 curg *g // current running goroutine 239 caughtsig *g // goroutine running during fatal signal 240 p *p // attached p for executing go code (nil if not executing go code) 241 nextp *p 242 id int32 243 mallocing int32 244 throwing int32 245 gcing int32 246 locks int32 247 softfloat int32 248 dying int32 249 profilehz int32 250 helpgc int32 251 spinning bool // m is out of work and is actively looking for work 252 blocked bool // m is blocked on a note 253 inwb bool // m is executing a write barrier 254 printlock int8 255 fastrand uint32 256 ncgocall uint64 // number of cgo calls in total 257 ncgo int32 // number of cgo calls currently in progress 258 cgomal *cgomal 259 park note 260 alllink *m // on allm 261 schedlink *m 262 machport uint32 // return address for mach ipc (os x) 263 mcache *mcache 264 lockedg *g 265 createstack [32]uintptr // stack that created this thread. 266 freglo [16]uint32 // d[i] lsb and f[i] 267 freghi [16]uint32 // d[i] msb and f[i+16] 268 fflag uint32 // floating point compare flags 269 locked uint32 // tracking for lockosthread 270 nextwaitm *m // next m waiting for lock 271 waitsema uintptr // semaphore for parking on locks 272 waitsemacount uint32 273 waitsemalock uint32 274 gcstats gcstats 275 needextram bool 276 traceback uint8 277 waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool 278 waitlock unsafe.Pointer 279 //#ifdef GOOS_windows 280 thread uintptr // thread handle 281 // these are here because they are too large to be on the stack 282 // of low-level NOSPLIT functions. 283 libcall libcall 284 libcallpc uintptr // for cpu profiler 285 libcallsp uintptr 286 libcallg *g 287 //#endif 288 //#ifdef GOOS_solaris 289 perrno *int32 // pointer to tls errno 290 // these are here because they are too large to be on the stack 291 // of low-level NOSPLIT functions. 292 //LibCall libcall; 293 ts mts 294 scratch mscratch 295 //#endif 296 //#ifdef GOOS_plan9 297 notesig *int8 298 errstr *byte 299 //#endif 300 } 301 302 type p struct { 303 lock mutex 304 305 id int32 306 status uint32 // one of pidle/prunning/... 307 link *p 308 schedtick uint32 // incremented on every scheduler call 309 syscalltick uint32 // incremented on every system call 310 m *m // back-link to associated m (nil if idle) 311 mcache *mcache 312 deferpool [5]*_defer // pool of available defer structs of different sizes (see panic.c) 313 314 // Cache of goroutine ids, amortizes accesses to runtimeĀ·sched.goidgen. 315 goidcache uint64 316 goidcacheend uint64 317 318 // Queue of runnable goroutines. 319 runqhead uint32 320 runqtail uint32 321 runq [256]*g 322 323 // Available G's (status == Gdead) 324 gfree *g 325 gfreecnt int32 326 327 pad [64]byte 328 } 329 330 const ( 331 // The max value of GOMAXPROCS. 332 // There are no fundamental restrictions on the value. 333 _MaxGomaxprocs = 1 << 8 334 ) 335 336 type schedt struct { 337 lock mutex 338 339 goidgen uint64 340 341 midle *m // idle m's waiting for work 342 nmidle int32 // number of idle m's waiting for work 343 nmidlelocked int32 // number of locked m's waiting for work 344 mcount int32 // number of m's that have been created 345 maxmcount int32 // maximum number of m's allowed (or die) 346 347 pidle *p // idle p's 348 npidle uint32 349 nmspinning uint32 350 351 // Global runnable queue. 352 runqhead *g 353 runqtail *g 354 runqsize int32 355 356 // Global cache of dead G's. 357 gflock mutex 358 gfree *g 359 ngfree int32 360 361 gcwaiting uint32 // gc is waiting to run 362 stopwait int32 363 stopnote note 364 sysmonwait uint32 365 sysmonnote note 366 lastpoll uint64 367 368 profilehz int32 // cpu profiling rate 369 } 370 371 // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread. 372 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active. 373 // External locks are not recursive; a second lock is silently ignored. 374 // The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread 375 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal). 376 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main 377 // goroutine is holding the lock during the initialization phase. 378 const ( 379 _LockExternal = 1 380 _LockInternal = 2 381 ) 382 383 type sigtabtt struct { 384 flags int32 385 name *int8 386 } 387 388 const ( 389 _SigNotify = 1 << 0 // let signal.Notify have signal, even if from kernel 390 _SigKill = 1 << 1 // if signal.Notify doesn't take it, exit quietly 391 _SigThrow = 1 << 2 // if signal.Notify doesn't take it, exit loudly 392 _SigPanic = 1 << 3 // if the signal is from the kernel, panic 393 _SigDefault = 1 << 4 // if the signal isn't explicitly requested, don't monitor it 394 _SigHandling = 1 << 5 // our signal handler is registered 395 _SigIgnored = 1 << 6 // the signal was ignored before we registered for it 396 _SigGoExit = 1 << 7 // cause all runtime procs to exit (only used on Plan 9). 397 _SigSetStack = 1 << 8 // add SA_ONSTACK to libc handler 398 ) 399 400 // Layout of in-memory per-function information prepared by linker 401 // See http://golang.org/s/go12symtab. 402 // Keep in sync with linker and with ../../libmach/sym.c 403 // and with package debug/gosym and with symtab.go in package runtime. 404 type _func struct { 405 entry uintptr // start pc 406 nameoff int32 // function name 407 408 args int32 // in/out args size 409 frame int32 // legacy frame size; use pcsp if possible 410 411 pcsp int32 412 pcfile int32 413 pcln int32 414 npcdata int32 415 nfuncdata int32 416 } 417 418 // layout of Itab known to compilers 419 // allocated in non-garbage-collected memory 420 type itab struct { 421 inter *interfacetype 422 _type *_type 423 link *itab 424 bad int32 425 unused int32 426 fun [1]uintptr // variable sized 427 } 428 429 // Lock-free stack node. 430 // // Also known to export_test.go. 431 type lfnode struct { 432 next uint64 433 pushcnt uintptr 434 } 435 436 // Parallel for descriptor. 437 type parfor struct { 438 body unsafe.Pointer // go func(*parfor, uint32), executed for each element 439 done uint32 // number of idle threads 440 nthr uint32 // total number of threads 441 nthrmax uint32 // maximum number of threads 442 thrseq uint32 // thread id sequencer 443 cnt uint32 // iteration space [0, cnt) 444 ctx unsafe.Pointer // arbitrary user context 445 wait bool // if true, wait while all threads finish processing, 446 // otherwise parfor may return while other threads are still working 447 thr *parforthread // array of thread descriptors 448 pad uint32 // to align parforthread.pos for 64-bit atomic operations 449 // stats 450 nsteal uint64 451 nstealcnt uint64 452 nprocyield uint64 453 nosyield uint64 454 nsleep uint64 455 } 456 457 // Track memory allocated by code not written in Go during a cgo call, 458 // so that the garbage collector can see them. 459 type cgomal struct { 460 next *cgomal 461 alloc unsafe.Pointer 462 } 463 464 // Indicates to write barrier and sychronization task to preform. 465 const ( 466 _GCoff = iota // GC not running, write barrier disabled 467 _GCquiesce // unused state 468 _GCstw // unused state 469 _GCscan // GC collecting roots into workbufs, write barrier disabled 470 _GCmark // GC marking from workbufs, write barrier ENABLED 471 _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED 472 _GCsweep // GC mark completed; sweeping in background, write barrier disabled 473 ) 474 475 type forcegcstate struct { 476 lock mutex 477 g *g 478 idle uint32 479 } 480 481 var gcphase uint32 482 483 /* 484 * known to compiler 485 */ 486 const ( 487 _Structrnd = regSize 488 ) 489 490 // startup_random_data holds random bytes initialized at startup. These come from 491 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). 492 var startupRandomData []byte 493 494 // extendRandom extends the random numbers in r[:n] to the whole slice r. 495 // Treats n<0 as n==0. 496 func extendRandom(r []byte, n int) { 497 if n < 0 { 498 n = 0 499 } 500 for n < len(r) { 501 // Extend random bits using hash function & time seed 502 w := n 503 if w > 16 { 504 w = 16 505 } 506 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w)) 507 for i := 0; i < ptrSize && n < len(r); i++ { 508 r[n] = byte(h) 509 n++ 510 h >>= 8 511 } 512 } 513 } 514 515 /* 516 * deferred subroutine calls 517 */ 518 type _defer struct { 519 siz int32 520 started bool 521 sp uintptr // sp at time of defer 522 pc uintptr 523 fn *funcval 524 _panic *_panic // panic that is running defer 525 link *_defer 526 } 527 528 /* 529 * panics 530 */ 531 type _panic struct { 532 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink 533 arg interface{} // argument to panic 534 link *_panic // link to earlier panic 535 recovered bool // whether this panic is over 536 aborted bool // the panic was aborted 537 } 538 539 /* 540 * stack traces 541 */ 542 543 type stkframe struct { 544 fn *_func // function being run 545 pc uintptr // program counter within fn 546 continpc uintptr // program counter where execution can continue, or 0 if not 547 lr uintptr // program counter at caller aka link register 548 sp uintptr // stack pointer at pc 549 fp uintptr // stack pointer at caller aka frame pointer 550 varp uintptr // top of local variables 551 argp uintptr // pointer to function arguments 552 arglen uintptr // number of bytes at argp 553 argmap *bitvector // force use of this argmap 554 } 555 556 const ( 557 _TraceRuntimeFrames = 1 << 0 // include frames for internal runtime functions. 558 _TraceTrap = 1 << 1 // the initial PC, SP are from a trap, not a return PC from a call 559 ) 560 561 const ( 562 // The maximum number of frames we print for a traceback 563 _TracebackMaxFrames = 100 564 ) 565 566 var ( 567 emptystring string 568 allg **g 569 allglen uintptr 570 lastg *g 571 allm *m 572 allp [_MaxGomaxprocs + 1]*p 573 gomaxprocs int32 574 needextram uint32 575 panicking uint32 576 goos *int8 577 ncpu int32 578 iscgo bool 579 cpuid_ecx uint32 580 cpuid_edx uint32 581 signote note 582 forcegc forcegcstate 583 sched schedt 584 newprocs int32 585 ) 586 587 /* 588 * mutual exclusion locks. in the uncontended case, 589 * as fast as spin locks (just a few user-level instructions), 590 * but on the contention path they sleep in the kernel. 591 * a zeroed Mutex is unlocked (no need to initialize each lock). 592 */ 593 594 /* 595 * sleep and wakeup on one-time events. 596 * before any calls to notesleep or notewakeup, 597 * must call noteclear to initialize the Note. 598 * then, exactly one thread can call notesleep 599 * and exactly one thread can call notewakeup (once). 600 * once notewakeup has been called, the notesleep 601 * will return. future notesleep will return immediately. 602 * subsequent noteclear must be called only after 603 * previous notesleep has returned, e.g. it's disallowed 604 * to call noteclear straight after notewakeup. 605 * 606 * notetsleep is like notesleep but wakes up after 607 * a given number of nanoseconds even if the event 608 * has not yet happened. if a goroutine uses notetsleep to 609 * wake up early, it must wait to call noteclear until it 610 * can be sure that no other goroutine is calling 611 * notewakeup. 612 * 613 * notesleep/notetsleep are generally called on g0, 614 * notetsleepg is similar to notetsleep but is called on user g. 615 */ 616 // bool runtimeĀ·notetsleep(Note*, int64); // false - timeout 617 // bool runtimeĀ·notetsleepg(Note*, int64); // false - timeout 618 619 /* 620 * Lock-free stack. 621 * Initialize uint64 head to 0, compare with 0 to test for emptiness. 622 * The stack does not keep pointers to nodes, 623 * so they can be garbage collected if there are no other pointers to nodes. 624 */ 625 626 /* 627 * Parallel for over [0, n). 628 * body() is executed for each iteration. 629 * nthr - total number of worker threads. 630 * ctx - arbitrary user context. 631 * if wait=true, threads return from parfor() when all work is done; 632 * otherwise, threads can return while other threads are still finishing processing. 633 */ 634 635 // for mmap, we only pass the lower 32 bits of file offset to the 636 // assembly routine; the higher bits (if required), should be provided 637 // by the assembly routine as 0.