github.com/aloncn/graphics-go@v0.0.1/src/runtime/runtime2.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 /* 14 * defined constants 15 */ 16 const ( 17 // G status 18 // 19 // If you add to this list, add to the list 20 // of "okay during garbage collection" status 21 // in mgcmark.go too. 22 _Gidle = iota // 0 23 _Grunnable // 1 runnable and on a run queue 24 _Grunning // 2 25 _Gsyscall // 3 26 _Gwaiting // 4 27 _Gmoribund_unused // 5 currently unused, but hardcoded in gdb scripts 28 _Gdead // 6 29 _Genqueue // 7 Only the Gscanenqueue is used. 30 _Gcopystack // 8 in this state when newstack is moving the stack 31 // the following encode that the GC is scanning the stack and what to do when it is done 32 _Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state, 33 // _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs 34 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning completes make Grunnable (it is already on run queue) 35 _Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack. 36 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make it Gsyscall 37 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting 38 // _Gscanmoribund_unused, // not possible 39 // _Gscandead, // not possible 40 _Gscanenqueue = _Gscan + _Genqueue // When scanning completes make it Grunnable and put on runqueue 41 ) 42 43 const ( 44 // P status 45 _Pidle = iota 46 _Prunning // Only this P is allowed to change from _Prunning. 47 _Psyscall 48 _Pgcstop 49 _Pdead 50 ) 51 52 type mutex struct { 53 // Futex-based impl treats it as uint32 key, 54 // while sema-based impl as M* waitm. 55 // Used to be a union, but unions break precise GC. 56 key uintptr 57 } 58 59 type note struct { 60 // Futex-based impl treats it as uint32 key, 61 // while sema-based impl as M* waitm. 62 // Used to be a union, but unions break precise GC. 63 key uintptr 64 } 65 66 type funcval struct { 67 fn uintptr 68 // variable-size, fn-specific data here 69 } 70 71 type iface struct { 72 tab *itab 73 data unsafe.Pointer 74 } 75 76 type eface struct { 77 _type *_type 78 data unsafe.Pointer 79 } 80 81 func efaceOf(ep *interface{}) *eface { 82 return (*eface)(unsafe.Pointer(ep)) 83 } 84 85 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 86 // It is particularly important to avoid write barriers when the current P has 87 // been released, because the GC thinks the world is stopped, and an 88 // unexpected write barrier would not be synchronized with the GC, 89 // which can lead to a half-executed write barrier that has marked the object 90 // but not queued it. If the GC skips the object and completes before the 91 // queuing can occur, it will incorrectly free the object. 92 // 93 // We tried using special assignment functions invoked only when not 94 // holding a running P, but then some updates to a particular memory 95 // word went through write barriers and some did not. This breaks the 96 // write barrier shadow checking mode, and it is also scary: better to have 97 // a word that is completely ignored by the GC than to have one for which 98 // only a few updates are ignored. 99 // 100 // Gs, Ms, and Ps are always reachable via true pointers in the 101 // allgs, allm, and allp lists or (during allocation before they reach those lists) 102 // from stack variables. 103 104 // A guintptr holds a goroutine pointer, but typed as a uintptr 105 // to bypass write barriers. It is used in the Gobuf goroutine state 106 // and in scheduling lists that are manipulated without a P. 107 // 108 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 109 // In one of the few places it is updated by Go code - func save - it must be 110 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 111 // Instead of figuring out how to emit the write barriers missing in the 112 // assembly manipulation, we change the type of the field to uintptr, 113 // so that it does not require write barriers at all. 114 // 115 // Goroutine structs are published in the allg list and never freed. 116 // That will keep the goroutine structs from being collected. 117 // There is never a time that Gobuf.g's contain the only references 118 // to a goroutine: the publishing of the goroutine in allg comes first. 119 // Goroutine pointers are also kept in non-GC-visible places like TLS, 120 // so I can't see them ever moving. If we did want to start moving data 121 // in the GC, we'd need to allocate the goroutine structs from an 122 // alternate arena. Using guintptr doesn't make that problem any worse. 123 type guintptr uintptr 124 125 //go:nosplit 126 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 127 128 //go:nosplit 129 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 130 131 //go:nosplit 132 func (gp *guintptr) cas(old, new guintptr) bool { 133 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 134 } 135 136 type puintptr uintptr 137 138 //go:nosplit 139 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 140 141 //go:nosplit 142 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 143 144 type muintptr uintptr 145 146 //go:nosplit 147 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 148 149 //go:nosplit 150 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 151 152 type gobuf struct { 153 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 154 sp uintptr 155 pc uintptr 156 g guintptr 157 ctxt unsafe.Pointer // this has to be a pointer so that gc scans it 158 ret sys.Uintreg 159 lr uintptr 160 bp uintptr // for GOEXPERIMENT=framepointer 161 } 162 163 // Known to compiler. 164 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype. 165 type sudog struct { 166 g *g 167 selectdone *uint32 168 next *sudog 169 prev *sudog 170 elem unsafe.Pointer // data element 171 releasetime int64 172 nrelease int32 // -1 for acquire 173 waitlink *sudog // g.waiting list 174 } 175 176 type gcstats struct { 177 // the struct must consist of only uint64's, 178 // because it is casted to uint64[]. 179 nhandoff uint64 180 nhandoffcnt uint64 181 nprocyield uint64 182 nosyield uint64 183 nsleep uint64 184 } 185 186 type libcall struct { 187 fn uintptr 188 n uintptr // number of parameters 189 args uintptr // parameters 190 r1 uintptr // return values 191 r2 uintptr 192 err uintptr // error number 193 } 194 195 // describes how to handle callback 196 type wincallbackcontext struct { 197 gobody unsafe.Pointer // go function to call 198 argsize uintptr // callback arguments size (in bytes) 199 restorestack uintptr // adjust stack on return by (in bytes) (386 only) 200 cleanstack bool 201 } 202 203 // Stack describes a Go execution stack. 204 // The bounds of the stack are exactly [lo, hi), 205 // with no implicit data structures on either side. 206 type stack struct { 207 lo uintptr 208 hi uintptr 209 } 210 211 // stkbar records the state of a G's stack barrier. 212 type stkbar struct { 213 savedLRPtr uintptr // location overwritten by stack barrier PC 214 savedLRVal uintptr // value overwritten at savedLRPtr 215 } 216 217 type g struct { 218 // Stack parameters. 219 // stack describes the actual stack memory: [stack.lo, stack.hi). 220 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 221 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 222 // stackguard1 is the stack pointer compared in the C stack growth prologue. 223 // It is stack.lo+StackGuard on g0 and gsignal stacks. 224 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 225 stack stack // offset known to runtime/cgo 226 stackguard0 uintptr // offset known to liblink 227 stackguard1 uintptr // offset known to liblink 228 229 _panic *_panic // innermost panic - offset known to liblink 230 _defer *_defer // innermost defer 231 m *m // current m; offset known to arm liblink 232 stackAlloc uintptr // stack allocation is [stack.lo,stack.lo+stackAlloc) 233 sched gobuf 234 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 235 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 236 stkbar []stkbar // stack barriers, from low to high (see top of mstkbar.go) 237 stkbarPos uintptr // index of lowest stack barrier not hit 238 stktopsp uintptr // expected sp at top of stack, to check in traceback 239 param unsafe.Pointer // passed parameter on wakeup 240 atomicstatus uint32 241 stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 242 goid int64 243 waitsince int64 // approx time when the g become blocked 244 waitreason string // if status==Gwaiting 245 schedlink guintptr 246 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 247 paniconfault bool // panic (instead of crash) on unexpected fault address 248 preemptscan bool // preempted g does scan for gc 249 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 250 gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan 251 throwsplit bool // must not split stack 252 raceignore int8 // ignore race detection events 253 sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine 254 sysexitticks int64 // cputicks when syscall has returned (for tracing) 255 sysexitseq uint64 // trace seq when syscall has returned (for tracing) 256 lockedm *m 257 sig uint32 258 writebuf []byte 259 sigcode0 uintptr 260 sigcode1 uintptr 261 sigpc uintptr 262 gopc uintptr // pc of go statement that created this goroutine 263 startpc uintptr // pc of goroutine function 264 racectx uintptr 265 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr) 266 267 // Per-G gcController state 268 269 // gcAssistBytes is this G's GC assist credit in terms of 270 // bytes allocated. If this is positive, then the G has credit 271 // to allocate gcAssistBytes bytes without assisting. If this 272 // is negative, then the G must correct this by performing 273 // scan work. We track this in bytes to make it fast to update 274 // and check for debt in the malloc hot path. The assist ratio 275 // determines how this corresponds to scan work debt. 276 gcAssistBytes int64 277 } 278 279 type m struct { 280 g0 *g // goroutine with scheduling stack 281 morebuf gobuf // gobuf arg to morestack 282 divmod uint32 // div/mod denominator for arm - known to liblink 283 284 // Fields not known to debuggers. 285 procid uint64 // for debuggers, but offset not hard-coded 286 gsignal *g // signal-handling g 287 sigmask sigset // storage for saved signal mask 288 tls [6]uintptr // thread-local storage (for x86 extern register) 289 mstartfn func() 290 curg *g // current running goroutine 291 caughtsig guintptr // goroutine running during fatal signal 292 p puintptr // attached p for executing go code (nil if not executing go code) 293 nextp puintptr 294 id int32 295 mallocing int32 296 throwing int32 297 preemptoff string // if != "", keep curg running on this m 298 locks int32 299 softfloat int32 300 dying int32 301 profilehz int32 302 helpgc int32 303 spinning bool // m is out of work and is actively looking for work 304 blocked bool // m is blocked on a note 305 inwb bool // m is executing a write barrier 306 newSigstack bool // minit on C thread called sigaltstack 307 printlock int8 308 fastrand uint32 309 ncgocall uint64 // number of cgo calls in total 310 ncgo int32 // number of cgo calls currently in progress 311 park note 312 alllink *m // on allm 313 schedlink muintptr 314 machport uint32 // return address for mach ipc (os x) 315 mcache *mcache 316 lockedg *g 317 createstack [32]uintptr // stack that created this thread. 318 freglo [16]uint32 // d[i] lsb and f[i] 319 freghi [16]uint32 // d[i] msb and f[i+16] 320 fflag uint32 // floating point compare flags 321 locked uint32 // tracking for lockosthread 322 nextwaitm uintptr // next m waiting for lock 323 gcstats gcstats 324 needextram bool 325 traceback uint8 326 waitunlockf unsafe.Pointer // todo go func(*g, unsafe.pointer) bool 327 waitlock unsafe.Pointer 328 waittraceev byte 329 waittraceskip int 330 startingtrace bool 331 syscalltick uint32 332 //#ifdef GOOS_windows 333 thread uintptr // thread handle 334 // these are here because they are too large to be on the stack 335 // of low-level NOSPLIT functions. 336 libcall libcall 337 libcallpc uintptr // for cpu profiler 338 libcallsp uintptr 339 libcallg guintptr 340 syscall libcall // stores syscall parameters on windows 341 //#endif 342 mOS 343 } 344 345 type p struct { 346 lock mutex 347 348 id int32 349 status uint32 // one of pidle/prunning/... 350 link puintptr 351 schedtick uint32 // incremented on every scheduler call 352 syscalltick uint32 // incremented on every system call 353 m muintptr // back-link to associated m (nil if idle) 354 mcache *mcache 355 356 deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go) 357 deferpoolbuf [5][32]*_defer 358 359 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. 360 goidcache uint64 361 goidcacheend uint64 362 363 // Queue of runnable goroutines. Accessed without lock. 364 runqhead uint32 365 runqtail uint32 366 runq [256]guintptr 367 // runnext, if non-nil, is a runnable G that was ready'd by 368 // the current G and should be run next instead of what's in 369 // runq if there's time remaining in the running G's time 370 // slice. It will inherit the time left in the current time 371 // slice. If a set of goroutines is locked in a 372 // communicate-and-wait pattern, this schedules that set as a 373 // unit and eliminates the (potentially large) scheduling 374 // latency that otherwise arises from adding the ready'd 375 // goroutines to the end of the run queue. 376 runnext guintptr 377 378 // Available G's (status == Gdead) 379 gfree *g 380 gfreecnt int32 381 382 sudogcache []*sudog 383 sudogbuf [128]*sudog 384 385 tracebuf traceBufPtr 386 387 palloc persistentAlloc // per-P to avoid mutex 388 389 // Per-P GC state 390 gcAssistTime int64 // Nanoseconds in assistAlloc 391 gcBgMarkWorker guintptr 392 gcMarkWorkerMode gcMarkWorkerMode 393 394 // gcw is this P's GC work buffer cache. The work buffer is 395 // filled by write barriers, drained by mutator assists, and 396 // disposed on certain GC state transitions. 397 gcw gcWork 398 399 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 400 401 pad [64]byte 402 } 403 404 const ( 405 // The max value of GOMAXPROCS. 406 // There are no fundamental restrictions on the value. 407 _MaxGomaxprocs = 1 << 8 408 ) 409 410 type schedt struct { 411 // accessed atomically. keep at top to ensure alignment on 32-bit systems. 412 goidgen uint64 413 lastpoll uint64 414 415 lock mutex 416 417 midle muintptr // idle m's waiting for work 418 nmidle int32 // number of idle m's waiting for work 419 nmidlelocked int32 // number of locked m's waiting for work 420 mcount int32 // number of m's that have been created 421 maxmcount int32 // maximum number of m's allowed (or die) 422 423 ngsys uint32 // number of system goroutines; updated atomically 424 425 pidle puintptr // idle p's 426 npidle uint32 427 nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go. 428 429 // Global runnable queue. 430 runqhead guintptr 431 runqtail guintptr 432 runqsize int32 433 434 // Global cache of dead G's. 435 gflock mutex 436 gfree *g 437 ngfree int32 438 439 // Central cache of sudog structs. 440 sudoglock mutex 441 sudogcache *sudog 442 443 // Central pool of available defer structs of different sizes. 444 deferlock mutex 445 deferpool [5]*_defer 446 447 gcwaiting uint32 // gc is waiting to run 448 stopwait int32 449 stopnote note 450 sysmonwait uint32 451 sysmonnote note 452 453 // safepointFn should be called on each P at the next GC 454 // safepoint if p.runSafePointFn is set. 455 safePointFn func(*p) 456 safePointWait int32 457 safePointNote note 458 459 profilehz int32 // cpu profiling rate 460 461 procresizetime int64 // nanotime() of last change to gomaxprocs 462 totaltime int64 // ∫gomaxprocs dt up to procresizetime 463 } 464 465 // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread. 466 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active. 467 // External locks are not recursive; a second lock is silently ignored. 468 // The upper bits of m->locked record the nesting depth of calls to lockOSThread 469 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal). 470 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main 471 // goroutine is holding the lock during the initialization phase. 472 const ( 473 _LockExternal = 1 474 _LockInternal = 2 475 ) 476 477 type sigtabtt struct { 478 flags int32 479 name *int8 480 } 481 482 const ( 483 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 484 _SigKill // if signal.Notify doesn't take it, exit quietly 485 _SigThrow // if signal.Notify doesn't take it, exit loudly 486 _SigPanic // if the signal is from the kernel, panic 487 _SigDefault // if the signal isn't explicitly requested, don't monitor it 488 _SigHandling // our signal handler is registered 489 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 490 _SigSetStack // add SA_ONSTACK to libc handler 491 _SigUnblock // unblocked in minit 492 ) 493 494 // Layout of in-memory per-function information prepared by linker 495 // See https://golang.org/s/go12symtab. 496 // Keep in sync with linker 497 // and with package debug/gosym and with symtab.go in package runtime. 498 type _func struct { 499 entry uintptr // start pc 500 nameoff int32 // function name 501 502 args int32 // in/out args size 503 _ int32 // previously legacy frame size; kept for layout compatibility 504 505 pcsp int32 506 pcfile int32 507 pcln int32 508 npcdata int32 509 nfuncdata int32 510 } 511 512 // layout of Itab known to compilers 513 // allocated in non-garbage-collected memory 514 type itab struct { 515 inter *interfacetype 516 _type *_type 517 link *itab 518 bad int32 519 unused int32 520 fun [1]uintptr // variable sized 521 } 522 523 // Lock-free stack node. 524 // // Also known to export_test.go. 525 type lfnode struct { 526 next uint64 527 pushcnt uintptr 528 } 529 530 type forcegcstate struct { 531 lock mutex 532 g *g 533 idle uint32 534 } 535 536 /* 537 * known to compiler 538 */ 539 const ( 540 _Structrnd = sys.RegSize 541 ) 542 543 // startup_random_data holds random bytes initialized at startup. These come from 544 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). 545 var startupRandomData []byte 546 547 // extendRandom extends the random numbers in r[:n] to the whole slice r. 548 // Treats n<0 as n==0. 549 func extendRandom(r []byte, n int) { 550 if n < 0 { 551 n = 0 552 } 553 for n < len(r) { 554 // Extend random bits using hash function & time seed 555 w := n 556 if w > 16 { 557 w = 16 558 } 559 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w)) 560 for i := 0; i < sys.PtrSize && n < len(r); i++ { 561 r[n] = byte(h) 562 n++ 563 h >>= 8 564 } 565 } 566 } 567 568 /* 569 * deferred subroutine calls 570 */ 571 type _defer struct { 572 siz int32 573 started bool 574 sp uintptr // sp at time of defer 575 pc uintptr 576 fn *funcval 577 _panic *_panic // panic that is running defer 578 link *_defer 579 } 580 581 /* 582 * panics 583 */ 584 type _panic struct { 585 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink 586 arg interface{} // argument to panic 587 link *_panic // link to earlier panic 588 recovered bool // whether this panic is over 589 aborted bool // the panic was aborted 590 } 591 592 /* 593 * stack traces 594 */ 595 596 type stkframe struct { 597 fn *_func // function being run 598 pc uintptr // program counter within fn 599 continpc uintptr // program counter where execution can continue, or 0 if not 600 lr uintptr // program counter at caller aka link register 601 sp uintptr // stack pointer at pc 602 fp uintptr // stack pointer at caller aka frame pointer 603 varp uintptr // top of local variables 604 argp uintptr // pointer to function arguments 605 arglen uintptr // number of bytes at argp 606 argmap *bitvector // force use of this argmap 607 } 608 609 const ( 610 _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions. 611 _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call 612 _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it 613 ) 614 615 const ( 616 // The maximum number of frames we print for a traceback 617 _TracebackMaxFrames = 100 618 ) 619 620 var ( 621 emptystring string 622 allglen uintptr 623 allm *m 624 allp [_MaxGomaxprocs + 1]*p 625 gomaxprocs int32 626 panicking uint32 627 ncpu int32 628 forcegc forcegcstate 629 sched schedt 630 newprocs int32 631 632 // Information about what cpu features are available. 633 // Set on startup in asm_{x86,amd64}.s. 634 cpuid_ecx uint32 635 cpuid_edx uint32 636 lfenceBeforeRdtsc bool 637 support_avx bool 638 support_avx2 bool 639 640 goarm uint8 // set by cmd/link on arm systems 641 ) 642 643 // Set by the linker so the runtime can determine the buildmode. 644 var ( 645 islibrary bool // -buildmode=c-shared 646 isarchive bool // -buildmode=c-archive 647 ) 648 649 /* 650 * mutual exclusion locks. in the uncontended case, 651 * as fast as spin locks (just a few user-level instructions), 652 * but on the contention path they sleep in the kernel. 653 * a zeroed Mutex is unlocked (no need to initialize each lock). 654 */ 655 656 /* 657 * sleep and wakeup on one-time events. 658 * before any calls to notesleep or notewakeup, 659 * must call noteclear to initialize the Note. 660 * then, exactly one thread can call notesleep 661 * and exactly one thread can call notewakeup (once). 662 * once notewakeup has been called, the notesleep 663 * will return. future notesleep will return immediately. 664 * subsequent noteclear must be called only after 665 * previous notesleep has returned, e.g. it's disallowed 666 * to call noteclear straight after notewakeup. 667 * 668 * notetsleep is like notesleep but wakes up after 669 * a given number of nanoseconds even if the event 670 * has not yet happened. if a goroutine uses notetsleep to 671 * wake up early, it must wait to call noteclear until it 672 * can be sure that no other goroutine is calling 673 * notewakeup. 674 * 675 * notesleep/notetsleep are generally called on g0, 676 * notetsleepg is similar to notetsleep but is called on user g. 677 */ 678 // bool runtime·notetsleep(Note*, int64); // false - timeout 679 // bool runtime·notetsleepg(Note*, int64); // false - timeout 680 681 /* 682 * Lock-free stack. 683 * Initialize uint64 head to 0, compare with 0 to test for emptiness. 684 * The stack does not keep pointers to nodes, 685 * so they can be garbage collected if there are no other pointers to nodes. 686 */ 687 688 // for mmap, we only pass the lower 32 bits of file offset to the 689 // assembly routine; the higher bits (if required), should be provided 690 // by the assembly routine as 0.