github.com/guyezi/gofrontend@v0.0.0-20200228202240-7a62a49e62c0/libgo/go/runtime/runtime2.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/cpu" 9 "runtime/internal/atomic" 10 "runtime/internal/sys" 11 "unsafe" 12 ) 13 14 // defined constants 15 const ( 16 // G status 17 // 18 // Beyond indicating the general state of a G, the G status 19 // acts like a lock on the goroutine's stack (and hence its 20 // ability to execute user code). 21 // 22 // If you add to this list, add to the list 23 // of "okay during garbage collection" status 24 // in mgcmark.go too. 25 // 26 // TODO(austin): The _Gscan bit could be much lighter-weight. 27 // For example, we could choose not to run _Gscanrunnable 28 // goroutines found in the run queue, rather than CAS-looping 29 // until they become _Grunnable. And transitions like 30 // _Gscanwaiting -> _Gscanrunnable are actually okay because 31 // they don't affect stack ownership. 32 33 // _Gidle means this goroutine was just allocated and has not 34 // yet been initialized. 35 _Gidle = iota // 0 36 37 // _Grunnable means this goroutine is on a run queue. It is 38 // not currently executing user code. The stack is not owned. 39 _Grunnable // 1 40 41 // _Grunning means this goroutine may execute user code. The 42 // stack is owned by this goroutine. It is not on a run queue. 43 // It is assigned an M and a P (g.m and g.m.p are valid). 44 _Grunning // 2 45 46 // _Gsyscall means this goroutine is executing a system call. 47 // It is not executing user code. The stack is owned by this 48 // goroutine. It is not on a run queue. It is assigned an M. 49 _Gsyscall // 3 50 51 // _Gwaiting means this goroutine is blocked in the runtime. 52 // It is not executing user code. It is not on a run queue, 53 // but should be recorded somewhere (e.g., a channel wait 54 // queue) so it can be ready()d when necessary. The stack is 55 // not owned *except* that a channel operation may read or 56 // write parts of the stack under the appropriate channel 57 // lock. Otherwise, it is not safe to access the stack after a 58 // goroutine enters _Gwaiting (e.g., it may get moved). 59 _Gwaiting // 4 60 61 // _Gmoribund_unused is currently unused, but hardcoded in gdb 62 // scripts. 63 _Gmoribund_unused // 5 64 65 // _Gdead means this goroutine is currently unused. It may be 66 // just exited, on a free list, or just being initialized. It 67 // is not executing user code. It may or may not have a stack 68 // allocated. The G and its stack (if any) are owned by the M 69 // that is exiting the G or that obtained the G from the free 70 // list. 71 _Gdead // 6 72 73 // _Genqueue_unused is currently unused. 74 _Genqueue_unused // 7 75 76 // _Gcopystack means this goroutine's stack is being moved. It 77 // is not executing user code and is not on a run queue. The 78 // stack is owned by the goroutine that put it in _Gcopystack. 79 _Gcopystack // 8 80 81 // _Gpreempted means this goroutine stopped itself for a 82 // suspendG preemption. It is like _Gwaiting, but nothing is 83 // yet responsible for ready()ing it. Some suspendG must CAS 84 // the status to _Gwaiting to take responsibility for 85 // ready()ing this G. 86 _Gpreempted // 9 87 88 // _Gexitingsyscall means this goroutine is exiting from a 89 // system call. This is like _Gsyscall, but the GC should not 90 // scan its stack. Currently this is only used in exitsyscall0 91 // as a transient state when it drops the G. 92 _Gexitingsyscall // 10 93 94 // _Gscan combined with one of the above states other than 95 // _Grunning indicates that GC is scanning the stack. The 96 // goroutine is not executing user code and the stack is owned 97 // by the goroutine that set the _Gscan bit. 98 // 99 // _Gscanrunning is different: it is used to briefly block 100 // state transitions while GC signals the G to scan its own 101 // stack. This is otherwise like _Grunning. 102 // 103 // atomicstatus&~Gscan gives the state the goroutine will 104 // return to when the scan completes. 105 _Gscan = 0x1000 106 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 107 _Gscanrunning = _Gscan + _Grunning // 0x1002 108 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 109 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 110 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 111 ) 112 113 const ( 114 // P status 115 116 // _Pidle means a P is not being used to run user code or the 117 // scheduler. Typically, it's on the idle P list and available 118 // to the scheduler, but it may just be transitioning between 119 // other states. 120 // 121 // The P is owned by the idle list or by whatever is 122 // transitioning its state. Its run queue is empty. 123 _Pidle = iota 124 125 // _Prunning means a P is owned by an M and is being used to 126 // run user code or the scheduler. Only the M that owns this P 127 // is allowed to change the P's status from _Prunning. The M 128 // may transition the P to _Pidle (if it has no more work to 129 // do), _Psyscall (when entering a syscall), or _Pgcstop (to 130 // halt for the GC). The M may also hand ownership of the P 131 // off directly to another M (e.g., to schedule a locked G). 132 _Prunning 133 134 // _Psyscall means a P is not running user code. It has 135 // affinity to an M in a syscall but is not owned by it and 136 // may be stolen by another M. This is similar to _Pidle but 137 // uses lightweight transitions and maintains M affinity. 138 // 139 // Leaving _Psyscall must be done with a CAS, either to steal 140 // or retake the P. Note that there's an ABA hazard: even if 141 // an M successfully CASes its original P back to _Prunning 142 // after a syscall, it must understand the P may have been 143 // used by another M in the interim. 144 _Psyscall 145 146 // _Pgcstop means a P is halted for STW and owned by the M 147 // that stopped the world. The M that stopped the world 148 // continues to use its P, even in _Pgcstop. Transitioning 149 // from _Prunning to _Pgcstop causes an M to release its P and 150 // park. 151 // 152 // The P retains its run queue and startTheWorld will restart 153 // the scheduler on Ps with non-empty run queues. 154 _Pgcstop 155 156 // _Pdead means a P is no longer used (GOMAXPROCS shrank). We 157 // reuse Ps if GOMAXPROCS increases. A dead P is mostly 158 // stripped of its resources, though a few things remain 159 // (e.g., trace buffers). 160 _Pdead 161 ) 162 163 // Mutual exclusion locks. In the uncontended case, 164 // as fast as spin locks (just a few user-level instructions), 165 // but on the contention path they sleep in the kernel. 166 // A zeroed Mutex is unlocked (no need to initialize each lock). 167 type mutex struct { 168 // Futex-based impl treats it as uint32 key, 169 // while sema-based impl as M* waitm. 170 // Used to be a union, but unions break precise GC. 171 key uintptr 172 } 173 174 // sleep and wakeup on one-time events. 175 // before any calls to notesleep or notewakeup, 176 // must call noteclear to initialize the Note. 177 // then, exactly one thread can call notesleep 178 // and exactly one thread can call notewakeup (once). 179 // once notewakeup has been called, the notesleep 180 // will return. future notesleep will return immediately. 181 // subsequent noteclear must be called only after 182 // previous notesleep has returned, e.g. it's disallowed 183 // to call noteclear straight after notewakeup. 184 // 185 // notetsleep is like notesleep but wakes up after 186 // a given number of nanoseconds even if the event 187 // has not yet happened. if a goroutine uses notetsleep to 188 // wake up early, it must wait to call noteclear until it 189 // can be sure that no other goroutine is calling 190 // notewakeup. 191 // 192 // notesleep/notetsleep are generally called on g0, 193 // notetsleepg is similar to notetsleep but is called on user g. 194 type note struct { 195 // Futex-based impl treats it as uint32 key, 196 // while sema-based impl as M* waitm. 197 // Used to be a union, but unions break precise GC. 198 key uintptr 199 } 200 201 type funcval struct { 202 fn uintptr 203 // variable-size, fn-specific data here 204 } 205 206 // The representation of a non-empty interface. 207 // See comment in iface.go for more details on this struct. 208 type iface struct { 209 tab unsafe.Pointer 210 data unsafe.Pointer 211 } 212 213 // The representation of an empty interface. 214 // See comment in iface.go for more details on this struct. 215 type eface struct { 216 _type *_type 217 data unsafe.Pointer 218 } 219 220 func efaceOf(ep *interface{}) *eface { 221 return (*eface)(unsafe.Pointer(ep)) 222 } 223 224 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 225 // It is particularly important to avoid write barriers when the current P has 226 // been released, because the GC thinks the world is stopped, and an 227 // unexpected write barrier would not be synchronized with the GC, 228 // which can lead to a half-executed write barrier that has marked the object 229 // but not queued it. If the GC skips the object and completes before the 230 // queuing can occur, it will incorrectly free the object. 231 // 232 // We tried using special assignment functions invoked only when not 233 // holding a running P, but then some updates to a particular memory 234 // word went through write barriers and some did not. This breaks the 235 // write barrier shadow checking mode, and it is also scary: better to have 236 // a word that is completely ignored by the GC than to have one for which 237 // only a few updates are ignored. 238 // 239 // Gs and Ps are always reachable via true pointers in the 240 // allgs and allp lists or (during allocation before they reach those lists) 241 // from stack variables. 242 // 243 // Ms are always reachable via true pointers either from allm or 244 // freem. Unlike Gs and Ps we do free Ms, so it's important that 245 // nothing ever hold an muintptr across a safe point. 246 247 // A guintptr holds a goroutine pointer, but typed as a uintptr 248 // to bypass write barriers. It is used in the Gobuf goroutine state 249 // and in scheduling lists that are manipulated without a P. 250 // 251 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 252 // In one of the few places it is updated by Go code - func save - it must be 253 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 254 // Instead of figuring out how to emit the write barriers missing in the 255 // assembly manipulation, we change the type of the field to uintptr, 256 // so that it does not require write barriers at all. 257 // 258 // Goroutine structs are published in the allg list and never freed. 259 // That will keep the goroutine structs from being collected. 260 // There is never a time that Gobuf.g's contain the only references 261 // to a goroutine: the publishing of the goroutine in allg comes first. 262 // Goroutine pointers are also kept in non-GC-visible places like TLS, 263 // so I can't see them ever moving. If we did want to start moving data 264 // in the GC, we'd need to allocate the goroutine structs from an 265 // alternate arena. Using guintptr doesn't make that problem any worse. 266 type guintptr uintptr 267 268 //go:nosplit 269 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 270 271 //go:nosplit 272 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 273 274 //go:nosplit 275 func (gp *guintptr) cas(old, new guintptr) bool { 276 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 277 } 278 279 // setGNoWB performs *gp = new without a write barrier. 280 // For times when it's impractical to use a guintptr. 281 //go:nosplit 282 //go:nowritebarrier 283 func setGNoWB(gp **g, new *g) { 284 (*guintptr)(unsafe.Pointer(gp)).set(new) 285 } 286 287 type puintptr uintptr 288 289 //go:nosplit 290 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 291 292 //go:nosplit 293 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 294 295 // muintptr is a *m that is not tracked by the garbage collector. 296 // 297 // Because we do free Ms, there are some additional constrains on 298 // muintptrs: 299 // 300 // 1. Never hold an muintptr locally across a safe point. 301 // 302 // 2. Any muintptr in the heap must be owned by the M itself so it can 303 // ensure it is not in use when the last true *m is released. 304 type muintptr uintptr 305 306 //go:nosplit 307 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 308 309 //go:nosplit 310 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 311 312 // setMNoWB performs *mp = new without a write barrier. 313 // For times when it's impractical to use an muintptr. 314 //go:nosplit 315 //go:nowritebarrier 316 func setMNoWB(mp **m, new *m) { 317 (*muintptr)(unsafe.Pointer(mp)).set(new) 318 } 319 320 // sudog represents a g in a wait list, such as for sending/receiving 321 // on a channel. 322 // 323 // sudog is necessary because the g ↔ synchronization object relation 324 // is many-to-many. A g can be on many wait lists, so there may be 325 // many sudogs for one g; and many gs may be waiting on the same 326 // synchronization object, so there may be many sudogs for one object. 327 // 328 // sudogs are allocated from a special pool. Use acquireSudog and 329 // releaseSudog to allocate and free them. 330 type sudog struct { 331 // The following fields are protected by the hchan.lock of the 332 // channel this sudog is blocking on. shrinkstack depends on 333 // this for sudogs involved in channel ops. 334 335 g *g 336 337 // isSelect indicates g is participating in a select, so 338 // g.selectDone must be CAS'd to win the wake-up race. 339 isSelect bool 340 next *sudog 341 prev *sudog 342 elem unsafe.Pointer // data element (may point to stack) 343 344 // The following fields are never accessed concurrently. 345 // For channels, waitlink is only accessed by g. 346 // For semaphores, all fields (including the ones above) 347 // are only accessed when holding a semaRoot lock. 348 349 acquiretime int64 350 releasetime int64 351 ticket uint32 352 parent *sudog // semaRoot binary tree 353 waitlink *sudog // g.waiting list or semaRoot 354 waittail *sudog // semaRoot 355 c *hchan // channel 356 } 357 358 /* 359 Not used by gccgo. 360 361 type libcall struct { 362 fn uintptr 363 n uintptr // number of parameters 364 args uintptr // parameters 365 r1 uintptr // return values 366 r2 uintptr 367 err uintptr // error number 368 } 369 370 */ 371 372 /* 373 Not used by gccgo. 374 375 // describes how to handle callback 376 type wincallbackcontext struct { 377 gobody unsafe.Pointer // go function to call 378 argsize uintptr // callback arguments size (in bytes) 379 restorestack uintptr // adjust stack on return by (in bytes) (386 only) 380 cleanstack bool 381 } 382 */ 383 384 /* 385 Not used by gccgo. 386 387 // Stack describes a Go execution stack. 388 // The bounds of the stack are exactly [lo, hi), 389 // with no implicit data structures on either side. 390 type stack struct { 391 lo uintptr 392 hi uintptr 393 } 394 */ 395 396 type g struct { 397 // Stack parameters. 398 // stack describes the actual stack memory: [stack.lo, stack.hi). 399 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 400 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 401 // stackguard1 is the stack pointer compared in the C stack growth prologue. 402 // It is stack.lo+StackGuard on g0 and gsignal stacks. 403 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 404 // Not for gccgo: stack stack // offset known to runtime/cgo 405 // Not for gccgo: stackguard0 uintptr // offset known to liblink 406 // Not for gccgo: stackguard1 uintptr // offset known to liblink 407 408 _panic *_panic // innermost panic - offset known to liblink 409 _defer *_defer // innermost defer 410 m *m // current m; offset known to arm liblink 411 // Not for gccgo: sched gobuf 412 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 413 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 414 // Not for gccgo: stktopsp uintptr // expected sp at top of stack, to check in traceback 415 param unsafe.Pointer // passed parameter on wakeup 416 atomicstatus uint32 417 // Not for gccgo: stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 418 goid int64 419 schedlink guintptr 420 waitsince int64 // approx time when the g become blocked 421 waitreason waitReason // if status==Gwaiting 422 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 423 preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule 424 // Not for gccgo: preemptShrink bool // shrink stack at synchronous safe point 425 // asyncSafePoint is set if g is stopped at an asynchronous 426 // safe point. This means there are frames on the stack 427 // without precise pointer information. 428 asyncSafePoint bool 429 430 paniconfault bool // panic (instead of crash) on unexpected fault address 431 preemptscan bool // preempted g does scan for gc 432 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 433 throwsplit bool // must not split stack 434 435 gcScannedSyscallStack bool // gccgo specific; see scanSyscallStack 436 437 // activeStackChans indicates that there are unlocked channels 438 // pointing into this goroutine's stack. If true, stack 439 // copying needs to acquire channel locks to protect these 440 // areas of the stack. 441 activeStackChans bool 442 443 raceignore int8 // ignore race detection events 444 sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine 445 sysexitticks int64 // cputicks when syscall has returned (for tracing) 446 traceseq uint64 // trace event sequencer 447 tracelastp puintptr // last P emitted an event for this goroutine 448 lockedm muintptr 449 sig uint32 450 writebuf []byte 451 sigcode0 uintptr 452 sigcode1 uintptr 453 sigpc uintptr 454 gopc uintptr // pc of go statement that created this goroutine 455 ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) 456 startpc uintptr // pc of goroutine function 457 // Not for gccgo: racectx uintptr 458 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order 459 // Not for gccgo: cgoCtxt []uintptr // cgo traceback context 460 labels unsafe.Pointer // profiler labels 461 timer *timer // cached timer for time.Sleep 462 selectDone uint32 // are we participating in a select and did someone win the race? 463 464 // Per-G GC state 465 466 // gcAssistBytes is this G's GC assist credit in terms of 467 // bytes allocated. If this is positive, then the G has credit 468 // to allocate gcAssistBytes bytes without assisting. If this 469 // is negative, then the G must correct this by performing 470 // scan work. We track this in bytes to make it fast to update 471 // and check for debt in the malloc hot path. The assist ratio 472 // determines how this corresponds to scan work debt. 473 gcAssistBytes int64 474 475 // Remaining fields are specific to gccgo. 476 477 exception unsafe.Pointer // current exception being thrown 478 isforeign bool // whether current exception is not from Go 479 480 // When using split-stacks, these fields holds the results of 481 // __splitstack_find while executing a syscall. These are used 482 // by the garbage collector to scan the goroutine's stack. 483 // 484 // When not using split-stacks, g0 stacks are allocated by the 485 // libc and other goroutine stacks are allocated by malg. 486 // gcstack: unused (sometimes cleared) 487 // gcstacksize: g0: 0; others: size of stack 488 // gcnextsegment: unused 489 // gcnextsp: current SP while executing a syscall 490 // gcinitialsp: g0: top of stack; others: start of stack memory 491 // gcnextsp2: current secondary stack pointer (if present) 492 // gcinitialsp2: start of secondary stack (if present) 493 gcstack uintptr 494 gcstacksize uintptr 495 gcnextsegment uintptr 496 gcnextsp uintptr 497 gcinitialsp unsafe.Pointer 498 gcnextsp2 uintptr 499 gcinitialsp2 unsafe.Pointer 500 501 // gcregs holds the register values while executing a syscall. 502 // This is set by getcontext and scanned by the garbage collector. 503 gcregs g_ucontext_t 504 505 entry func(unsafe.Pointer) // goroutine function to run 506 entryfn uintptr // function address passed to __go_go 507 entrysp uintptr // the stack pointer of the outermost Go frame 508 fromgogo bool // whether entered from gogo function 509 510 scanningself bool // whether goroutine is scanning its own stack 511 512 scang uintptr // the g that wants to scan this g's stack (uintptr to avoid write barrier) 513 scangcw uintptr // gc worker for scanning stack (uintptr to avoid write barrier) 514 515 isSystemGoroutine bool // whether goroutine is a "system" goroutine 516 isFinalizerGoroutine bool // whether goroutine is the finalizer goroutine 517 518 deferring bool // whether we are running a deferred function 519 goexiting bool // whether we are running Goexit 520 ranCgocallBackDone bool // whether we deferred CgocallBackDone 521 522 traceback uintptr // stack traceback buffer 523 524 context g_ucontext_t // saved context for setcontext 525 stackcontext [10]uintptr // split-stack context 526 } 527 528 type m struct { 529 g0 *g // goroutine with scheduling stack 530 // Not for gccgo: morebuf gobuf // gobuf arg to morestack 531 // Not for gccgo: divmod uint32 // div/mod denominator for arm - known to liblink 532 533 // Fields not known to debuggers. 534 procid uint64 // for debuggers, but offset not hard-coded 535 gsignal *g // signal-handling g 536 // Not for gccgo: goSigStack gsignalStack // Go-allocated signal handling stack 537 sigmask sigset // storage for saved signal mask 538 // Not for gccgo: tls [6]uintptr // thread-local storage (for x86 extern register) 539 mstartfn func() 540 curg *g // current running goroutine 541 caughtsig guintptr // goroutine running during fatal signal 542 p puintptr // attached p for executing go code (nil if not executing go code) 543 nextp puintptr 544 oldp puintptr // the p that was attached before executing a syscall 545 id int64 546 mallocing int32 547 throwing int32 548 preemptoff string // if != "", keep curg running on this m 549 locks int32 550 softfloat int32 551 dying int32 552 profilehz int32 553 spinning bool // m is out of work and is actively looking for work 554 blocked bool // m is blocked on a note 555 newSigstack bool // minit on C thread called sigaltstack 556 printlock int8 557 incgo bool // m is executing a cgo call 558 freeWait uint32 // if == 0, safe to free g0 and delete m (atomic) 559 fastrand [2]uint32 560 needextram bool 561 traceback uint8 562 ncgocall uint64 // number of cgo calls in total 563 ncgo int32 // number of cgo calls currently in progress 564 // Not for gccgo: cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily 565 // Not for gccgo: cgoCallers *cgoCallers // cgo traceback if crashing in cgo call 566 park note 567 alllink *m // on allm 568 schedlink muintptr 569 mcache *mcache 570 lockedg guintptr 571 createstack [32]location // stack that created this thread. 572 lockedExt uint32 // tracking for external LockOSThread 573 lockedInt uint32 // tracking for internal lockOSThread 574 nextwaitm muintptr // next m waiting for lock 575 waitunlockf func(*g, unsafe.Pointer) bool 576 waitlock unsafe.Pointer 577 waittraceev byte 578 waittraceskip int 579 startingtrace bool 580 syscalltick uint32 581 freelink *m // on sched.freem 582 583 // these are here because they are too large to be on the stack 584 // of low-level NOSPLIT functions. 585 // Not for gccgo: libcall libcall 586 // Not for gccgo: libcallpc uintptr // for cpu profiler 587 // Not for gccgo: libcallsp uintptr 588 // Not for gccgo: libcallg guintptr 589 // Not for gccgo: syscall libcall // stores syscall parameters on windows 590 591 // preemptGen counts the number of completed preemption 592 // signals. This is used to detect when a preemption is 593 // requested, but fails. Accessed atomically. 594 preemptGen uint32 595 596 dlogPerM 597 598 mOS 599 600 // Remaining fields are specific to gccgo. 601 602 gsignalstack unsafe.Pointer // stack for gsignal 603 gsignalstacksize uintptr 604 605 dropextram bool // drop after call is done 606 exiting bool // thread is exiting 607 608 scannote note // synchonization for signal-based stack scanning 609 } 610 611 type p struct { 612 id int32 613 status uint32 // one of pidle/prunning/... 614 link puintptr 615 schedtick uint32 // incremented on every scheduler call 616 syscalltick uint32 // incremented on every system call 617 sysmontick sysmontick // last tick observed by sysmon 618 m muintptr // back-link to associated m (nil if idle) 619 mcache *mcache 620 pcache pageCache 621 raceprocctx uintptr 622 623 // gccgo has only one size of defer. 624 deferpool []*_defer 625 deferpoolbuf [32]*_defer 626 627 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. 628 goidcache uint64 629 goidcacheend uint64 630 631 // Queue of runnable goroutines. Accessed without lock. 632 runqhead uint32 633 runqtail uint32 634 runq [256]guintptr 635 // runnext, if non-nil, is a runnable G that was ready'd by 636 // the current G and should be run next instead of what's in 637 // runq if there's time remaining in the running G's time 638 // slice. It will inherit the time left in the current time 639 // slice. If a set of goroutines is locked in a 640 // communicate-and-wait pattern, this schedules that set as a 641 // unit and eliminates the (potentially large) scheduling 642 // latency that otherwise arises from adding the ready'd 643 // goroutines to the end of the run queue. 644 runnext guintptr 645 646 // Available G's (status == Gdead) 647 gFree struct { 648 gList 649 n int32 650 } 651 652 sudogcache []*sudog 653 sudogbuf [128]*sudog 654 655 // Cache of mspan objects from the heap. 656 mspancache struct { 657 // We need an explicit length here because this field is used 658 // in allocation codepaths where write barriers are not allowed, 659 // and eliminating the write barrier/keeping it eliminated from 660 // slice updates is tricky, moreso than just managing the length 661 // ourselves. 662 len int 663 buf [128]*mspan 664 } 665 666 tracebuf traceBufPtr 667 668 // traceSweep indicates the sweep events should be traced. 669 // This is used to defer the sweep start event until a span 670 // has actually been swept. 671 traceSweep bool 672 // traceSwept and traceReclaimed track the number of bytes 673 // swept and reclaimed by sweeping in the current sweep loop. 674 traceSwept, traceReclaimed uintptr 675 676 palloc persistentAlloc // per-P to avoid mutex 677 678 _ uint32 // Alignment for atomic fields below 679 680 // The when field of the first entry on the timer heap. 681 // This is updated using atomic functions. 682 // This is 0 if the timer heap is empty. 683 timer0When uint64 684 685 // Per-P GC state 686 gcAssistTime int64 // Nanoseconds in assistAlloc 687 gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic) 688 gcBgMarkWorker guintptr // (atomic) 689 gcMarkWorkerMode gcMarkWorkerMode 690 691 // gcMarkWorkerStartTime is the nanotime() at which this mark 692 // worker started. 693 gcMarkWorkerStartTime int64 694 695 // gcw is this P's GC work buffer cache. The work buffer is 696 // filled by write barriers, drained by mutator assists, and 697 // disposed on certain GC state transitions. 698 gcw gcWork 699 700 // wbBuf is this P's GC write barrier buffer. 701 // 702 // TODO: Consider caching this in the running G. 703 wbBuf wbBuf 704 705 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 706 707 // Lock for timers. We normally access the timers while running 708 // on this P, but the scheduler can also do it from a different P. 709 timersLock mutex 710 711 // Actions to take at some time. This is used to implement the 712 // standard library's time package. 713 // Must hold timersLock to access. 714 timers []*timer 715 716 // Number of timers in P's heap. 717 // Modified using atomic instructions. 718 numTimers uint32 719 720 // Number of timerModifiedEarlier timers on P's heap. 721 // This should only be modified while holding timersLock, 722 // or while the timer status is in a transient state 723 // such as timerModifying. 724 adjustTimers uint32 725 726 // Number of timerDeleted timers in P's heap. 727 // Modified using atomic instructions. 728 deletedTimers uint32 729 730 // Race context used while executing timer functions. 731 // Not for gccgo: timerRaceCtx uintptr 732 733 // preempt is set to indicate that this P should be enter the 734 // scheduler ASAP (regardless of what G is running on it). 735 preempt bool 736 737 pad cpu.CacheLinePad 738 } 739 740 type schedt struct { 741 // accessed atomically. keep at top to ensure alignment on 32-bit systems. 742 goidgen uint64 743 lastpoll uint64 // time of last network poll, 0 if currently polling 744 pollUntil uint64 // time to which current poll is sleeping 745 746 lock mutex 747 748 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be 749 // sure to call checkdead(). 750 751 midle muintptr // idle m's waiting for work 752 nmidle int32 // number of idle m's waiting for work 753 nmidlelocked int32 // number of locked m's waiting for work 754 mnext int64 // number of m's that have been created and next M ID 755 maxmcount int32 // maximum number of m's allowed (or die) 756 nmsys int32 // number of system m's not counted for deadlock 757 nmfreed int64 // cumulative number of freed m's 758 759 ngsys uint32 // number of system goroutines; updated atomically 760 761 pidle puintptr // idle p's 762 npidle uint32 763 nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go. 764 765 // Global runnable queue. 766 runq gQueue 767 runqsize int32 768 769 // disable controls selective disabling of the scheduler. 770 // 771 // Use schedEnableUser to control this. 772 // 773 // disable is protected by sched.lock. 774 disable struct { 775 // user disables scheduling of user goroutines. 776 user bool 777 runnable gQueue // pending runnable Gs 778 n int32 // length of runnable 779 } 780 781 // Global cache of dead G's. 782 gFree struct { 783 lock mutex 784 list gList // Gs 785 n int32 786 } 787 788 // Central cache of sudog structs. 789 sudoglock mutex 790 sudogcache *sudog 791 792 // Central pool of available defer structs. 793 deferlock mutex 794 deferpool *_defer 795 796 // freem is the list of m's waiting to be freed when their 797 // m.exited is set. Linked through m.freelink. 798 freem *m 799 800 gcwaiting uint32 // gc is waiting to run 801 stopwait int32 802 stopnote note 803 sysmonwait uint32 804 sysmonnote note 805 806 // safepointFn should be called on each P at the next GC 807 // safepoint if p.runSafePointFn is set. 808 safePointFn func(*p) 809 safePointWait int32 810 safePointNote note 811 812 profilehz int32 // cpu profiling rate 813 814 procresizetime int64 // nanotime() of last change to gomaxprocs 815 totaltime int64 // ∫gomaxprocs dt up to procresizetime 816 } 817 818 // Values for the flags field of a sigTabT. 819 const ( 820 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 821 _SigKill // if signal.Notify doesn't take it, exit quietly 822 _SigThrow // if signal.Notify doesn't take it, exit loudly 823 _SigPanic // if the signal is from the kernel, panic 824 _SigDefault // if the signal isn't explicitly requested, don't monitor it 825 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 826 _SigSetStack // add SA_ONSTACK to libc handler 827 _SigUnblock // always unblock; see blockableSig 828 _SigIgn // _SIG_DFL action is to ignore the signal 829 ) 830 831 // Lock-free stack node. 832 // Also known to export_test.go. 833 type lfnode struct { 834 next uint64 835 pushcnt uintptr 836 } 837 838 type forcegcstate struct { 839 lock mutex 840 g *g 841 idle uint32 842 } 843 844 // startup_random_data holds random bytes initialized at startup. These come from 845 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). 846 var startupRandomData []byte 847 848 // extendRandom extends the random numbers in r[:n] to the whole slice r. 849 // Treats n<0 as n==0. 850 func extendRandom(r []byte, n int) { 851 if n < 0 { 852 n = 0 853 } 854 for n < len(r) { 855 // Extend random bits using hash function & time seed 856 w := n 857 if w > 16 { 858 w = 16 859 } 860 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w)) 861 for i := 0; i < sys.PtrSize && n < len(r); i++ { 862 r[n] = byte(h) 863 n++ 864 h >>= 8 865 } 866 } 867 } 868 869 // A _defer holds an entry on the list of deferred calls. 870 // If you add a field here, add code to clear it in freedefer. 871 // This struct must match the code in Defer_statement::defer_struct_type 872 // in the compiler. 873 // Some defers will be allocated on the stack and some on the heap. 874 // All defers are logically part of the stack, so write barriers to 875 // initialize them are not required. All defers must be manually scanned, 876 // and for heap defers, marked. 877 type _defer struct { 878 // The next entry in the stack. 879 link *_defer 880 881 // The stack variable for the function which called this defer 882 // statement. This is set to true if we are returning from 883 // that function, false if we are panicing through it. 884 frame *bool 885 886 // The value of the panic stack when this function is 887 // deferred. This function can not recover this value from 888 // the panic stack. This can happen if a deferred function 889 // has a defer statement itself. 890 panicStack *_panic 891 892 // The panic that caused the defer to run. This is used to 893 // discard panics that have already been handled. 894 _panic *_panic 895 896 // The function to call. 897 pfn uintptr 898 899 // The argument to pass to the function. 900 arg unsafe.Pointer 901 902 // The return address that a recover thunk matches against. 903 // This is set by __go_set_defer_retaddr which is called by 904 // the thunks created by defer statements. 905 retaddr uintptr 906 907 // Set to true if a function created by reflect.MakeFunc is 908 // permitted to recover. The return address of such a 909 // function function will be somewhere in libffi, so __retaddr 910 // is not useful. 911 makefunccanrecover bool 912 913 // Whether the _defer is heap allocated. 914 heap bool 915 } 916 917 // panics 918 // This is the gccgo version. 919 // 920 // This is marked go:notinheap because _panic values must only ever 921 // live on the stack. 922 // 923 //go:notinheap 924 type _panic struct { 925 // The next entry in the stack. 926 link *_panic 927 928 // The value associated with this panic. 929 arg interface{} 930 931 // Whether this panic has been recovered. 932 recovered bool 933 934 // Whether this panic was pushed on the stack because of an 935 // exception thrown in some other language. 936 isforeign bool 937 938 // Whether this panic was already seen by a deferred function 939 // which called panic again. 940 aborted bool 941 942 // Whether this panic was created for goexit. 943 goexit bool 944 } 945 946 // ancestorInfo records details of where a goroutine was started. 947 type ancestorInfo struct { 948 pcs []uintptr // pcs from the stack of this goroutine 949 goid int64 // goroutine id of this goroutine; original goroutine possibly dead 950 gopc uintptr // pc of go statement that created this goroutine 951 } 952 953 const ( 954 _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions. 955 _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call 956 _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it 957 ) 958 959 // The maximum number of frames we print for a traceback 960 const _TracebackMaxFrames = 100 961 962 // A waitReason explains why a goroutine has been stopped. 963 // See gopark. Do not re-use waitReasons, add new ones. 964 type waitReason uint8 965 966 const ( 967 waitReasonZero waitReason = iota // "" 968 waitReasonGCAssistMarking // "GC assist marking" 969 waitReasonIOWait // "IO wait" 970 waitReasonChanReceiveNilChan // "chan receive (nil chan)" 971 waitReasonChanSendNilChan // "chan send (nil chan)" 972 waitReasonDumpingHeap // "dumping heap" 973 waitReasonGarbageCollection // "garbage collection" 974 waitReasonGarbageCollectionScan // "garbage collection scan" 975 waitReasonPanicWait // "panicwait" 976 waitReasonSelect // "select" 977 waitReasonSelectNoCases // "select (no cases)" 978 waitReasonGCAssistWait // "GC assist wait" 979 waitReasonGCSweepWait // "GC sweep wait" 980 waitReasonGCScavengeWait // "GC scavenge wait" 981 waitReasonChanReceive // "chan receive" 982 waitReasonChanSend // "chan send" 983 waitReasonFinalizerWait // "finalizer wait" 984 waitReasonForceGGIdle // "force gc (idle)" 985 waitReasonSemacquire // "semacquire" 986 waitReasonSleep // "sleep" 987 waitReasonSyncCondWait // "sync.Cond.Wait" 988 waitReasonTimerGoroutineIdle // "timer goroutine (idle)" 989 waitReasonTraceReaderBlocked // "trace reader (blocked)" 990 waitReasonWaitForGCCycle // "wait for GC cycle" 991 waitReasonGCWorkerIdle // "GC worker (idle)" 992 waitReasonPreempted // "preempted" 993 ) 994 995 var waitReasonStrings = [...]string{ 996 waitReasonZero: "", 997 waitReasonGCAssistMarking: "GC assist marking", 998 waitReasonIOWait: "IO wait", 999 waitReasonChanReceiveNilChan: "chan receive (nil chan)", 1000 waitReasonChanSendNilChan: "chan send (nil chan)", 1001 waitReasonDumpingHeap: "dumping heap", 1002 waitReasonGarbageCollection: "garbage collection", 1003 waitReasonGarbageCollectionScan: "garbage collection scan", 1004 waitReasonPanicWait: "panicwait", 1005 waitReasonSelect: "select", 1006 waitReasonSelectNoCases: "select (no cases)", 1007 waitReasonGCAssistWait: "GC assist wait", 1008 waitReasonGCSweepWait: "GC sweep wait", 1009 waitReasonGCScavengeWait: "GC scavenge wait", 1010 waitReasonChanReceive: "chan receive", 1011 waitReasonChanSend: "chan send", 1012 waitReasonFinalizerWait: "finalizer wait", 1013 waitReasonForceGGIdle: "force gc (idle)", 1014 waitReasonSemacquire: "semacquire", 1015 waitReasonSleep: "sleep", 1016 waitReasonSyncCondWait: "sync.Cond.Wait", 1017 waitReasonTimerGoroutineIdle: "timer goroutine (idle)", 1018 waitReasonTraceReaderBlocked: "trace reader (blocked)", 1019 waitReasonWaitForGCCycle: "wait for GC cycle", 1020 waitReasonGCWorkerIdle: "GC worker (idle)", 1021 waitReasonPreempted: "preempted", 1022 } 1023 1024 func (w waitReason) String() string { 1025 if w < 0 || w >= waitReason(len(waitReasonStrings)) { 1026 return "unknown wait reason" 1027 } 1028 return waitReasonStrings[w] 1029 } 1030 1031 var ( 1032 allglen uintptr 1033 allm *m 1034 allp []*p // len(allp) == gomaxprocs; may change at safe points, otherwise immutable 1035 allpLock mutex // Protects P-less reads of allp and all writes 1036 gomaxprocs int32 1037 ncpu int32 1038 forcegc forcegcstate 1039 sched schedt 1040 newprocs int32 1041 1042 support_aes bool 1043 ) 1044 1045 // Set by the linker so the runtime can determine the buildmode. 1046 var ( 1047 islibrary bool // -buildmode=c-shared 1048 isarchive bool // -buildmode=c-archive 1049 ) 1050 1051 // Types that are only used by gccgo. 1052 1053 // g_ucontext_t is a Go version of the C ucontext_t type, used by getcontext. 1054 // _sizeof_ucontext_t is defined by mkrsysinfo.sh from <ucontext.h>. 1055 // On some systems getcontext and friends require a value that is 1056 // aligned to a 16-byte boundary. We implement this by increasing the 1057 // required size and picking an appropriate offset when we use the 1058 // array. 1059 type g_ucontext_t [(_sizeof_ucontext_t + 15) / unsafe.Sizeof(uintptr(0))]uintptr 1060 1061 // sigset is the Go version of the C type sigset_t. 1062 // _sigset_t is defined by the Makefile from <signal.h>. 1063 type sigset _sigset_t 1064 1065 // getMemstats returns a pointer to the internal memstats variable, 1066 // for C code. 1067 //go:linkname getMemstats 1068 func getMemstats() *mstats { 1069 return &memstats 1070 }