github.com/sitano/gsysint@v0.0.0-20190607084937-69a4f3233e4e/g/runtime2.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package g 6 7 import ( 8 "unsafe" 9 10 "github.com/sitano/gsysint/sys" 11 ) 12 13 // defined constants 14 const ( 15 // G status 16 // 17 // Beyond indicating the general state of a G, the G status 18 // acts like a lock on the goroutine's stack (and hence its 19 // ability to execute user code). 20 // 21 // If you add to this list, add to the list 22 // of "okay during garbage collection" status 23 // in mgcmark.go too. 24 25 // _Gidle means this goroutine was just allocated and has not 26 // yet been initialized. 27 _Gidle = iota // 0 28 29 // _Grunnable means this goroutine is on a run queue. It is 30 // not currently executing user code. The stack is not owned. 31 _Grunnable // 1 32 33 // _Grunning means this goroutine may execute user code. The 34 // stack is owned by this goroutine. It is not on a run queue. 35 // It is assigned an M and a P. 36 _Grunning // 2 37 38 // _Gsyscall means this goroutine is executing a system call. 39 // It is not executing user code. The stack is owned by this 40 // goroutine. It is not on a run queue. It is assigned an M. 41 _Gsyscall // 3 42 43 // _Gwaiting means this goroutine is blocked in the runtime. 44 // It is not executing user code. It is not on a run queue, 45 // but should be recorded somewhere (e.g., a channel wait 46 // queue) so it can be ready()d when necessary. The stack is 47 // not owned *except* that a channel operation may read or 48 // write parts of the stack under the appropriate channel 49 // lock. Otherwise, it is not safe to access the stack after a 50 // goroutine enters _Gwaiting (e.g., it may get moved). 51 _Gwaiting // 4 52 53 // _Gmoribund_unused is currently unused, but hardcoded in gdb 54 // scripts. 55 _Gmoribund_unused // 5 56 57 // _Gdead means this goroutine is currently unused. It may be 58 // just exited, on a free list, or just being initialized. It 59 // is not executing user code. It may or may not have a stack 60 // allocated. The G and its stack (if any) are owned by the M 61 // that is exiting the G or that obtained the G from the free 62 // list. 63 _Gdead // 6 64 65 // _Genqueue_unused is currently unused. 66 _Genqueue_unused // 7 67 68 // _Gcopystack means this goroutine's stack is being moved. It 69 // is not executing user code and is not on a run queue. The 70 // stack is owned by the goroutine that put it in _Gcopystack. 71 _Gcopystack // 8 72 73 // _Gscan combined with one of the above states other than 74 // _Grunning indicates that GC is scanning the stack. The 75 // goroutine is not executing user code and the stack is owned 76 // by the goroutine that set the _Gscan bit. 77 // 78 // _Gscanrunning is different: it is used to briefly block 79 // state transitions while GC signals the G to scan its own 80 // stack. This is otherwise like _Grunning. 81 // 82 // atomicstatus&~Gscan gives the state the goroutine will 83 // return to when the scan completes. 84 _Gscan = 0x1000 85 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 86 _Gscanrunning = _Gscan + _Grunning // 0x1002 87 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 88 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 89 ) 90 91 const ( 92 // P status 93 _Pidle = iota 94 _Prunning // Only this P is allowed to change from _Prunning. 95 _Psyscall 96 _Pgcstop 97 _Pdead 98 ) 99 100 const ( 101 MutexUnlocked = 0 102 MutexLocked = 1 103 MutexSleeping = 2 104 ) 105 106 // Mutual exclusion locks. In the uncontended case, 107 // as fast as spin locks (just a few user-level instructions), 108 // but on the contention path they sleep in the kernel. 109 // A zeroed Mutex is unlocked (no need to initialize each lock). 110 type Mutex struct { 111 // Futex-based impl treats it as uint32 key, 112 // while sema-based impl as M* waitm. 113 // Used to be a union, but unions break precise GC. 114 Key uintptr 115 } 116 117 // sleep and wakeup on one-time events. 118 // before any calls to notesleep or notewakeup, 119 // must call noteclear to initialize the Note. 120 // then, exactly one thread can call notesleep 121 // and exactly one thread can call notewakeup (once). 122 // once notewakeup has been called, the notesleep 123 // will return. future notesleep will return immediately. 124 // subsequent noteclear must be called only after 125 // previous notesleep has returned, e.g. it's disallowed 126 // to call noteclear straight after notewakeup. 127 // 128 // notetsleep is like notesleep but wakes up after 129 // a given number of nanoseconds even if the event 130 // has not yet happened. if a goroutine uses notetsleep to 131 // wake up early, it must wait to call noteclear until it 132 // can be sure that no other goroutine is calling 133 // notewakeup. 134 // 135 // notesleep/notetsleep are generally called on g0, 136 // notetsleepg is similar to notetsleep but is called on user g. 137 type Note struct { 138 // Futex-based impl treats it as uint32 key, 139 // while sema-based impl as M* waitm. 140 // Used to be a union, but unions break precise GC. 141 Key uintptr 142 } 143 144 type FuncVal struct { 145 FN uintptr 146 // variable-size, fn-specific data here 147 } 148 149 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 150 // It is particularly important to avoid write barriers when the current P has 151 // been released, because the GC thinks the world is stopped, and an 152 // unexpected write barrier would not be synchronized with the GC, 153 // which can lead to a half-executed write barrier that has marked the object 154 // but not queued it. If the GC skips the object and completes before the 155 // queuing can occur, it will incorrectly free the object. 156 // 157 // We tried using special assignment functions invoked only when not 158 // holding a running P, but then some updates to a particular memory 159 // word went through write barriers and some did not. This breaks the 160 // write barrier shadow checking mode, and it is also scary: better to have 161 // a word that is completely ignored by the GC than to have one for which 162 // only a few updates are ignored. 163 // 164 // Gs and Ps are always reachable via true pointers in the 165 // allgs and allp lists or (during allocation before they reach those lists) 166 // from stack variables. 167 // 168 // Ms are always reachable via true pointers either from allm or 169 // freem. Unlike Gs and Ps we do free Ms, so it's important that 170 // nothing ever hold an muintptr across a safe point. 171 172 // A guintptr holds a goroutine pointer, but typed as a uintptr 173 // to bypass write barriers. It is used in the Gobuf goroutine state 174 // and in scheduling lists that are manipulated without a P. 175 // 176 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 177 // In one of the few places it is updated by Go code - func save - it must be 178 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 179 // Instead of figuring out how to emit the write barriers missing in the 180 // assembly manipulation, we change the type of the field to uintptr, 181 // so that it does not require write barriers at all. 182 // 183 // Goroutine structs are published in the allg list and never freed. 184 // That will keep the goroutine structs from being collected. 185 // There is never a time that Gobuf.g's contain the only references 186 // to a goroutine: the publishing of the goroutine in allg comes first. 187 // Goroutine pointers are also kept in non-GC-visible places like TLS, 188 // so I can't see them ever moving. If we did want to start moving data 189 // in the GC, we'd need to allocate the goroutine structs from an 190 // alternate arena. Using guintptr doesn't make that problem any worse. 191 type Guintptr uintptr 192 193 //go:nosplit 194 func (gp Guintptr) Ptr() *G { return (*G)(unsafe.Pointer(gp)) } 195 196 //go:nosplit 197 func (gp *Guintptr) Set(g *G) { *gp = Guintptr(unsafe.Pointer(g)) } 198 199 //go:nosplit 200 //func (gp *guintptr) Cas(old, new guintptr) bool { 201 // return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 202 //} 203 204 type Puintptr uintptr 205 206 //go:nosplit 207 //func (pp Puintptr) ptr() *P { return (*P)(unsafe.Pointer(pp)) } 208 209 //go:nosplit 210 //func (pp *Puintptr) set(p *P) { *pp = Puintptr(unsafe.Pointer(p)) } 211 212 // muintptr is a *m that is not tracked by the garbage collector. 213 // 214 // Because we do free Ms, there are some additional constrains on 215 // muintptrs: 216 // 217 // 1. Never hold an muintptr locally across a safe point. 218 // 219 // 2. Any muintptr in the heap must be owned by the M itself so it can 220 // ensure it is not in use when the last true *m is released. 221 type Muintptr uintptr 222 223 //go:nosplit 224 func (mp Muintptr) Ptr() *M { return (*M)(unsafe.Pointer(mp)) } 225 226 //go:nosplit 227 func (mp *Muintptr) Set(m *M) { *mp = Muintptr(unsafe.Pointer(m)) } 228 229 type Uintreg uint64 230 231 type GoBuf struct { 232 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 233 // 234 // ctxt is unusual with respect to GC: it may be a 235 // heap-allocated funcval, so GC needs to track it, but it 236 // needs to be set and cleared from assembly, where it's 237 // difficult to have write barriers. However, ctxt is really a 238 // saved, live register, and we only ever exchange it between 239 // the real register and the gobuf. Hence, we treat it as a 240 // root during stack scanning, which means assembly that saves 241 // and restores it doesn't need write barriers. It's still 242 // typed as a pointer so that any other writes from Go get 243 // write barriers. 244 sp uintptr 245 pc uintptr 246 g Guintptr 247 ctxt unsafe.Pointer 248 ret sys.Uintreg 249 lr uintptr 250 bp uintptr // for GOEXPERIMENT=framepointer 251 } 252 253 // Sudog represents a g in a wait list, such as for sending/receiving 254 // on a channel. 255 // 256 // Sudog is necessary because the g ↔ synchronization object relation 257 // is many-to-many. a g can be on many wait lists, so there may be 258 // many sudogs for one g; and many gs may be waiting on the same 259 // synchronization object, so there may be many sudogs for one object. 260 // 261 // Sudogs are allocated from a special pool. use acquiresudog and 262 // releasesudog to allocate and free them. 263 type Sudog struct { 264 // the following fields are protected by the hchan.lock of the 265 // channel this sudog is blocking on. shrinkstack depends on 266 // this for sudogs involved in channel ops. 267 268 g *G 269 270 // isselect indicates g is participating in a select, so 271 // g.selectdone must be cas'd to win the wake-up race. 272 isselect bool 273 next *Sudog 274 prev *Sudog 275 elem unsafe.Pointer // data element (may point to stack) 276 277 // the following fields are never accessed concurrently. 278 // for channels, waitlink is only accessed by g. 279 // for semaphores, all fields (including the ones above) 280 // are only accessed when holding a semaroot lock. 281 282 acquiretime int64 283 releasetime int64 284 ticket uint32 285 parent *Sudog // semaroot binary tree 286 waitlink *Sudog // g.waiting list or semaroot 287 waittail *Sudog // semaroot 288 c *HChan // channel 289 } 290 291 type GCStats struct { 292 // the struct must consist of only uint64's, 293 // because it is casted to uint64[]. 294 nhandoff uint64 295 nhandoffcnt uint64 296 nprocyield uint64 297 nosyield uint64 298 nsleep uint64 299 } 300 301 type LibCall struct { 302 fn uintptr 303 n uintptr // number of parameters 304 args uintptr // parameters 305 r1 uintptr // return values 306 r2 uintptr 307 err uintptr // error number 308 } 309 310 // describes how to handle callback 311 type WinCallBackContext struct { 312 gobody unsafe.Pointer // go function to call 313 argsize uintptr // callback arguments size (in bytes) 314 restorestack uintptr // adjust stack on return by (in bytes) (386 only) 315 cleanstack bool 316 } 317 318 // Stack describes a Go execution stack. 319 // The bounds of the stack are exactly [lo, hi), 320 // with no implicit data structures on either side. 321 type Stack struct { 322 lo uintptr 323 hi uintptr 324 } 325 326 // stkbar records the state of a G's stack barrier. 327 type StkBar struct { 328 savedLRPtr uintptr // location overwritten by stack barrier PC 329 savedLRVal uintptr // value overwritten at savedLRPtr 330 } 331 332 /* 333 * deferred subroutine calls 334 */ 335 type Defer struct { 336 siz int32 337 started bool 338 sp uintptr // sp at time of defer 339 pc uintptr 340 fn *FuncVal 341 _panic *Panic // panic that is running defer 342 link *Defer 343 } 344 345 /* 346 * panics 347 */ 348 type Panic struct { 349 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink 350 arg interface{} // argument to panic 351 link *Panic // link to earlier panic 352 recovered bool // whether this panic is over 353 aborted bool // the panic was aborted 354 } 355 356 // Layout of in-memory per-function information prepared by linker 357 // See https://golang.org/s/go12symtab. 358 // Keep in sync with linker 359 // and with package debug/gosym and with symtab.go in package runtime. 360 type Func struct { 361 entry uintptr // start pc 362 nameoff int32 // function name 363 364 args int32 // in/out args size 365 _ int32 // previously legacy frame size; kept for layout compatibility 366 367 pcsp int32 368 pcfile int32 369 pcln int32 370 npcdata int32 371 nfuncdata int32 372 } 373 374 /* 375 * stack traces 376 */ 377 378 type StkFrame struct { 379 fn *Func // function being run 380 pc uintptr // program counter within fn 381 continpc uintptr // program counter where execution can continue, or 0 if not 382 lr uintptr // program counter at caller aka link register 383 sp uintptr // stack pointer at pc 384 fp uintptr // stack pointer at caller aka frame pointer 385 varp uintptr // top of local variables 386 argp uintptr // pointer to function arguments 387 arglen uintptr // number of bytes at argp 388 argmap *BitVector // force use of this argmap 389 } 390 391 // AncestorInfo records details of where a goroutine was started. 392 type AncestorInfo struct { 393 PCS []uintptr // pcs from the stack of this goroutine 394 GoID int64 // goroutine id of this goroutine; original goroutine possibly dead 395 GoPC uintptr // pc of go statement that created this goroutine 396 } 397 398 // Per-thread (in Go, per-P) cache for small objects. 399 // No locking needed because it is per-thread (per-P). 400 // 401 // mcaches are allocated from non-GC'd memory, so any heap pointers 402 // must be specially handled. 403 type MCache struct { 404 // ... 405 } 406 407 type G struct { 408 // Stack parameters. 409 // stack describes the actual stack memory: [stack.lo, stack.hi). 410 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 411 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 412 // stackguard1 is the stack pointer compared in the C stack growth prologue. 413 // It is stack.lo+StackGuard on g0 and gsignal stacks. 414 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 415 Stack Stack // offset known to runtime/cgo 416 StackGuard0 uintptr // offset known to liblink 417 StackGuard1 uintptr // offset known to liblink 418 419 Panic *Panic // innermost panic - offset known to liblink 420 Defer *Defer // innermost defer 421 M *M // current m; offset known to arm liblink 422 Sched GoBuf 423 SysCallSP uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 424 SysCallPC uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 425 StkTopSP uintptr // expected sp at top of stack, to check in traceback 426 Param unsafe.Pointer // passed parameter on wakeup 427 AtomicStatus uint32 428 StackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 429 GoID int64 430 SchedLink Guintptr 431 WaitSince int64 // approx time when the g become blocked 432 WaitReason WaitReason // if status==Gwaiting 433 Preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 434 PanicOnFault bool // panic (instead of crash) on unexpected fault address 435 PreemptScan bool // preempted g does scan for gc 436 GcScanDone bool // g has scanned stack; protected by _Gscan bit in status 437 GcScanValid bool // false at start of gc cycle, true if G has not run since last scan; TODO: remove? 438 ThrowSplit bool // must not split stack 439 RaceIgnore int8 // ignore race detection events 440 SysBlockTraced bool // StartTrace has emitted EvGoInSyscall about this goroutine 441 SysExitTicks int64 // cputicks when syscall has returned (for tracing) 442 TraceSeq uint64 // trace event sequencer 443 TraceLastP Puintptr // last P emitted an event for this goroutine 444 LockedM Muintptr 445 Sig uint32 446 WriteBuf []byte 447 SigCode0 uintptr 448 SigCode1 uintptr 449 SigPC uintptr 450 GoPC uintptr // pc of go statement that created this goroutine 451 Ancestors *[]AncestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) 452 StartPC uintptr // pc of goroutine function 453 RaceCtx uintptr 454 Waiting *Sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order 455 CgoCtxt []uintptr // cgo traceback context 456 Labels unsafe.Pointer // profiler labels 457 Timer *Timer // cached timer for time.Sleep 458 SelectDone uint32 // are we participating in a select and did someone win the race? 459 460 // Per-G GC state 461 462 // gcAssistBytes is this G's GC assist credit in terms of 463 // bytes allocated. If this is positive, then the G has credit 464 // to allocate gcAssistBytes bytes without assisting. If this 465 // is negative, then the G must correct this by performing 466 // scan work. We track this in bytes to make it fast to update 467 // and check for debt in the malloc hot path. The assist ratio 468 // determines how this corresponds to scan work debt. 469 GcAssistBytes int64 470 } 471 472 type M struct { 473 G0 *G // goroutine with scheduling stack 474 MoreBuf GoBuf // gobuf arg to morestack 475 DivMod uint32 // div/mod denominator for arm - known to liblink 476 477 // Fields not known to debuggers. 478 ProcID uint64 // for debuggers, but offset not hard-coded 479 GSignal *G // signal-handling g 480 GoSigStack GSignalStack // Go-allocated signal handling stack 481 SigMask SigSet // storage for saved signal mask 482 TLS [6]uintptr // thread-local storage (for x86 extern register) 483 MStartFn func() 484 CurG *G // current running goroutine 485 CaughtSig Guintptr // goroutine running during fatal signal 486 P Puintptr // attached p for executing go code (nil if not executing go code) 487 NextP Puintptr 488 OldP Puintptr // the p that was attached before executing a syscall 489 ID int64 490 MAllocing int32 491 Throwing int32 492 PreemptOff string // if != "", keep curg running on this m 493 Locks int32 494 Dying int32 495 ProfileHz int32 496 Spinning bool // m is out of work and is actively looking for work 497 Blocked bool // m is blocked on a note 498 InWb bool // m is executing a write barrier 499 NewSigStack bool // minit on C thread called sigaltstack 500 PrintLock int8 501 IncGo bool // m is executing a cgo call 502 FreeWait uint32 // if == 0, safe to free g0 and delete m (atomic) 503 FastRand [2]uint32 504 NeedextRam bool 505 TraceBack uint8 506 NCgoCall uint64 // number of cgo calls in total 507 NCgo int32 // number of cgo calls currently in progress 508 CgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily 509 CgoCallers *CgoCallers // cgo traceback if crashing in cgo call 510 Park Note 511 AllLink *M // on allm 512 SchedLink Muintptr 513 MCache *MCache 514 LockedG Guintptr 515 CreateStack [32]uintptr // stack that created this thread. 516 LockedExt uint32 // tracking for external LockOSThread 517 LockedInt uint32 // tracking for internal lockOSThread 518 NextWaitM Muintptr // next m waiting for lock 519 WaitUnlockF unsafe.Pointer // todo go func(*g, unsafe.pointer) bool 520 WaitLock unsafe.Pointer 521 WaitTraceEv byte 522 WaitTraceSkip int 523 StartingTrace bool 524 SysCallTick uint32 525 Thread uintptr // thread handle 526 FreeLink *M // on sched.freem 527 528 // these are here because they are too large to be on the stack 529 // of low-level NOSPLIT functions. 530 LibCall LibCall 531 LibCallPC uintptr // for cpu profiler 532 LibCallSP uintptr 533 LibCallG Guintptr 534 Syscall LibCall // stores syscall parameters on windows 535 536 VDSOSP uintptr // SP for traceback while in VDSO call (0 if not in call) 537 VDSOPC uintptr // PC for traceback while in VDSO call 538 539 MOS 540 } 541 542 // A waitReason explains why a goroutine has been stopped. 543 // See gopark. Do not re-use waitReasons, add new ones. 544 type WaitReason uint8 545 546 const ( 547 WaitReasonZero WaitReason = iota // "" 548 WaitReasonGCAssistMarking // "GC assist marking" 549 WaitReasonIOWait // "IO wait" 550 WaitReasonChanReceiveNilChan // "chan receive (nil chan)" 551 WaitReasonChanSendNilChan // "chan send (nil chan)" 552 WaitReasonDumpingHeap // "dumping heap" 553 WaitReasonGarbageCollection // "garbage collection" 554 WaitReasonGarbageCollectionScan // "garbage collection scan" 555 WaitReasonPanicWait // "panicwait" 556 WaitReasonSelect // "select" 557 WaitReasonSelectNoCases // "select (no cases)" 558 WaitReasonGCAssistWait // "GC assist wait" 559 WaitReasonGCSweepWait // "GC sweep wait" 560 WaitReasonChanReceive // "chan receive" 561 WaitReasonChanSend // "chan send" 562 WaitReasonFinalizerWait // "finalizer wait" 563 WaitReasonForceGGIdle // "force gc (idle)" 564 WaitReasonSemacquire // "semacquire" 565 WaitReasonSleep // "sleep" 566 WaitReasonSyncCondWait // "sync.Cond.Wait" 567 WaitReasonTimerGoroutineIdle // "timer goroutine (idle)" 568 WaitReasonTraceReaderBlocked // "trace reader (blocked)" 569 WaitReasonWaitForGCCycle // "wait for GC cycle" 570 WaitReasonGCWorkerIdle // "GC worker (idle)" 571 ) 572 573 var WaitReasonStrings = [...]string{ 574 WaitReasonZero: "", 575 WaitReasonGCAssistMarking: "GC assist marking", 576 WaitReasonIOWait: "IO wait", 577 WaitReasonChanReceiveNilChan: "chan receive (nil chan)", 578 WaitReasonChanSendNilChan: "chan send (nil chan)", 579 WaitReasonDumpingHeap: "dumping heap", 580 WaitReasonGarbageCollection: "garbage collection", 581 WaitReasonGarbageCollectionScan: "garbage collection scan", 582 WaitReasonPanicWait: "panicwait", 583 WaitReasonSelect: "select", 584 WaitReasonSelectNoCases: "select (no cases)", 585 WaitReasonGCAssistWait: "GC assist wait", 586 WaitReasonGCSweepWait: "GC sweep wait", 587 WaitReasonChanReceive: "chan receive", 588 WaitReasonChanSend: "chan send", 589 WaitReasonFinalizerWait: "finalizer wait", 590 WaitReasonForceGGIdle: "force gc (idle)", 591 WaitReasonSemacquire: "semacquire", 592 WaitReasonSleep: "sleep", 593 WaitReasonSyncCondWait: "sync.Cond.Wait", 594 WaitReasonTimerGoroutineIdle: "timer goroutine (idle)", 595 WaitReasonTraceReaderBlocked: "trace reader (blocked)", 596 WaitReasonWaitForGCCycle: "wait for GC cycle", 597 WaitReasonGCWorkerIdle: "GC worker (idle)", 598 } 599 600 func (w WaitReason) String() string { 601 if w < 0 || w >= WaitReason(len(WaitReasonStrings)) { 602 return "unknown wait reason" 603 } 604 return WaitReasonStrings[w] 605 }