github.com/x04/go/src@v0.0.0-20200202162449-3d481ceb3525/runtime/runtime2.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "github.com/x04/go/src/internal/cpu" 9 "github.com/x04/go/src/runtime/internal/atomic" 10 "github.com/x04/go/src/runtime/internal/sys" 11 "github.com/x04/go/src/unsafe" 12 ) 13 14 // defined constants 15 const ( 16 // G status 17 // 18 // Beyond indicating the general state of a G, the G status 19 // acts like a lock on the goroutine's stack (and hence its 20 // ability to execute user code). 21 // 22 // If you add to this list, add to the list 23 // of "okay during garbage collection" status 24 // in mgcmark.go too. 25 // 26 // TODO(austin): The _Gscan bit could be much lighter-weight. 27 // For example, we could choose not to run _Gscanrunnable 28 // goroutines found in the run queue, rather than CAS-looping 29 // until they become _Grunnable. And transitions like 30 // _Gscanwaiting -> _Gscanrunnable are actually okay because 31 // they don't affect stack ownership. 32 33 // _Gidle means this goroutine was just allocated and has not 34 // yet been initialized. 35 _Gidle = iota // 0 36 37 // _Grunnable means this goroutine is on a run queue. It is 38 // not currently executing user code. The stack is not owned. 39 _Grunnable // 1 40 41 // _Grunning means this goroutine may execute user code. The 42 // stack is owned by this goroutine. It is not on a run queue. 43 // It is assigned an M and a P (g.m and g.m.p are valid). 44 _Grunning // 2 45 46 // _Gsyscall means this goroutine is executing a system call. 47 // It is not executing user code. The stack is owned by this 48 // goroutine. It is not on a run queue. It is assigned an M. 49 _Gsyscall // 3 50 51 // _Gwaiting means this goroutine is blocked in the runtime. 52 // It is not executing user code. It is not on a run queue, 53 // but should be recorded somewhere (e.g., a channel wait 54 // queue) so it can be ready()d when necessary. The stack is 55 // not owned *except* that a channel operation may read or 56 // write parts of the stack under the appropriate channel 57 // lock. Otherwise, it is not safe to access the stack after a 58 // goroutine enters _Gwaiting (e.g., it may get moved). 59 _Gwaiting // 4 60 61 // _Gmoribund_unused is currently unused, but hardcoded in gdb 62 // scripts. 63 _Gmoribund_unused // 5 64 65 // _Gdead means this goroutine is currently unused. It may be 66 // just exited, on a free list, or just being initialized. It 67 // is not executing user code. It may or may not have a stack 68 // allocated. The G and its stack (if any) are owned by the M 69 // that is exiting the G or that obtained the G from the free 70 // list. 71 _Gdead // 6 72 73 // _Genqueue_unused is currently unused. 74 _Genqueue_unused // 7 75 76 // _Gcopystack means this goroutine's stack is being moved. It 77 // is not executing user code and is not on a run queue. The 78 // stack is owned by the goroutine that put it in _Gcopystack. 79 _Gcopystack // 8 80 81 // _Gpreempted means this goroutine stopped itself for a 82 // suspendG preemption. It is like _Gwaiting, but nothing is 83 // yet responsible for ready()ing it. Some suspendG must CAS 84 // the status to _Gwaiting to take responsibility for 85 // ready()ing this G. 86 _Gpreempted // 9 87 88 // _Gscan combined with one of the above states other than 89 // _Grunning indicates that GC is scanning the stack. The 90 // goroutine is not executing user code and the stack is owned 91 // by the goroutine that set the _Gscan bit. 92 // 93 // _Gscanrunning is different: it is used to briefly block 94 // state transitions while GC signals the G to scan its own 95 // stack. This is otherwise like _Grunning. 96 // 97 // atomicstatus&~Gscan gives the state the goroutine will 98 // return to when the scan completes. 99 _Gscan = 0x1000 100 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 101 _Gscanrunning = _Gscan + _Grunning // 0x1002 102 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 103 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 104 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 105 ) 106 107 const ( 108 // P status 109 110 // _Pidle means a P is not being used to run user code or the 111 // scheduler. Typically, it's on the idle P list and available 112 // to the scheduler, but it may just be transitioning between 113 // other states. 114 // 115 // The P is owned by the idle list or by whatever is 116 // transitioning its state. Its run queue is empty. 117 _Pidle = iota 118 119 // _Prunning means a P is owned by an M and is being used to 120 // run user code or the scheduler. Only the M that owns this P 121 // is allowed to change the P's status from _Prunning. The M 122 // may transition the P to _Pidle (if it has no more work to 123 // do), _Psyscall (when entering a syscall), or _Pgcstop (to 124 // halt for the GC). The M may also hand ownership of the P 125 // off directly to another M (e.g., to schedule a locked G). 126 _Prunning 127 128 // _Psyscall means a P is not running user code. It has 129 // affinity to an M in a syscall but is not owned by it and 130 // may be stolen by another M. This is similar to _Pidle but 131 // uses lightweight transitions and maintains M affinity. 132 // 133 // Leaving _Psyscall must be done with a CAS, either to steal 134 // or retake the P. Note that there's an ABA hazard: even if 135 // an M successfully CASes its original P back to _Prunning 136 // after a syscall, it must understand the P may have been 137 // used by another M in the interim. 138 _Psyscall 139 140 // _Pgcstop means a P is halted for STW and owned by the M 141 // that stopped the world. The M that stopped the world 142 // continues to use its P, even in _Pgcstop. Transitioning 143 // from _Prunning to _Pgcstop causes an M to release its P and 144 // park. 145 // 146 // The P retains its run queue and startTheWorld will restart 147 // the scheduler on Ps with non-empty run queues. 148 _Pgcstop 149 150 // _Pdead means a P is no longer used (GOMAXPROCS shrank). We 151 // reuse Ps if GOMAXPROCS increases. A dead P is mostly 152 // stripped of its resources, though a few things remain 153 // (e.g., trace buffers). 154 _Pdead 155 ) 156 157 // Mutual exclusion locks. In the uncontended case, 158 // as fast as spin locks (just a few user-level instructions), 159 // but on the contention path they sleep in the kernel. 160 // A zeroed Mutex is unlocked (no need to initialize each lock). 161 type mutex struct { 162 // Futex-based impl treats it as uint32 key, 163 // while sema-based impl as M* waitm. 164 // Used to be a union, but unions break precise GC. 165 key uintptr 166 } 167 168 // sleep and wakeup on one-time events. 169 // before any calls to notesleep or notewakeup, 170 // must call noteclear to initialize the Note. 171 // then, exactly one thread can call notesleep 172 // and exactly one thread can call notewakeup (once). 173 // once notewakeup has been called, the notesleep 174 // will return. future notesleep will return immediately. 175 // subsequent noteclear must be called only after 176 // previous notesleep has returned, e.g. it's disallowed 177 // to call noteclear straight after notewakeup. 178 // 179 // notetsleep is like notesleep but wakes up after 180 // a given number of nanoseconds even if the event 181 // has not yet happened. if a goroutine uses notetsleep to 182 // wake up early, it must wait to call noteclear until it 183 // can be sure that no other goroutine is calling 184 // notewakeup. 185 // 186 // notesleep/notetsleep are generally called on g0, 187 // notetsleepg is similar to notetsleep but is called on user g. 188 type note struct { 189 // Futex-based impl treats it as uint32 key, 190 // while sema-based impl as M* waitm. 191 // Used to be a union, but unions break precise GC. 192 key uintptr 193 } 194 195 type funcval struct { 196 fn uintptr 197 // variable-size, fn-specific data here 198 } 199 200 type iface struct { 201 tab *itab 202 data unsafe.Pointer 203 } 204 205 type eface struct { 206 _type *_type 207 data unsafe.Pointer 208 } 209 210 func efaceOf(ep *interface{}) *eface { 211 return (*eface)(unsafe.Pointer(ep)) 212 } 213 214 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 215 // It is particularly important to avoid write barriers when the current P has 216 // been released, because the GC thinks the world is stopped, and an 217 // unexpected write barrier would not be synchronized with the GC, 218 // which can lead to a half-executed write barrier that has marked the object 219 // but not queued it. If the GC skips the object and completes before the 220 // queuing can occur, it will incorrectly free the object. 221 // 222 // We tried using special assignment functions invoked only when not 223 // holding a running P, but then some updates to a particular memory 224 // word went through write barriers and some did not. This breaks the 225 // write barrier shadow checking mode, and it is also scary: better to have 226 // a word that is completely ignored by the GC than to have one for which 227 // only a few updates are ignored. 228 // 229 // Gs and Ps are always reachable via true pointers in the 230 // allgs and allp lists or (during allocation before they reach those lists) 231 // from stack variables. 232 // 233 // Ms are always reachable via true pointers either from allm or 234 // freem. Unlike Gs and Ps we do free Ms, so it's important that 235 // nothing ever hold an muintptr across a safe point. 236 237 // A guintptr holds a goroutine pointer, but typed as a uintptr 238 // to bypass write barriers. It is used in the Gobuf goroutine state 239 // and in scheduling lists that are manipulated without a P. 240 // 241 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 242 // In one of the few places it is updated by Go code - func save - it must be 243 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 244 // Instead of figuring out how to emit the write barriers missing in the 245 // assembly manipulation, we change the type of the field to uintptr, 246 // so that it does not require write barriers at all. 247 // 248 // Goroutine structs are published in the allg list and never freed. 249 // That will keep the goroutine structs from being collected. 250 // There is never a time that Gobuf.g's contain the only references 251 // to a goroutine: the publishing of the goroutine in allg comes first. 252 // Goroutine pointers are also kept in non-GC-visible places like TLS, 253 // so I can't see them ever moving. If we did want to start moving data 254 // in the GC, we'd need to allocate the goroutine structs from an 255 // alternate arena. Using guintptr doesn't make that problem any worse. 256 type guintptr uintptr 257 258 //go:nosplit 259 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 260 261 //go:nosplit 262 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 263 264 //go:nosplit 265 func (gp *guintptr) cas(old, new guintptr) bool { 266 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 267 } 268 269 // setGNoWB performs *gp = new without a write barrier. 270 // For times when it's impractical to use a guintptr. 271 //go:nosplit 272 //go:nowritebarrier 273 func setGNoWB(gp **g, new *g) { 274 (*guintptr)(unsafe.Pointer(gp)).set(new) 275 } 276 277 type puintptr uintptr 278 279 //go:nosplit 280 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 281 282 //go:nosplit 283 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 284 285 // muintptr is a *m that is not tracked by the garbage collector. 286 // 287 // Because we do free Ms, there are some additional constrains on 288 // muintptrs: 289 // 290 // 1. Never hold an muintptr locally across a safe point. 291 // 292 // 2. Any muintptr in the heap must be owned by the M itself so it can 293 // ensure it is not in use when the last true *m is released. 294 type muintptr uintptr 295 296 //go:nosplit 297 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 298 299 //go:nosplit 300 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 301 302 // setMNoWB performs *mp = new without a write barrier. 303 // For times when it's impractical to use an muintptr. 304 //go:nosplit 305 //go:nowritebarrier 306 func setMNoWB(mp **m, new *m) { 307 (*muintptr)(unsafe.Pointer(mp)).set(new) 308 } 309 310 type gobuf struct { 311 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 312 // 313 // ctxt is unusual with respect to GC: it may be a 314 // heap-allocated funcval, so GC needs to track it, but it 315 // needs to be set and cleared from assembly, where it's 316 // difficult to have write barriers. However, ctxt is really a 317 // saved, live register, and we only ever exchange it between 318 // the real register and the gobuf. Hence, we treat it as a 319 // root during stack scanning, which means assembly that saves 320 // and restores it doesn't need write barriers. It's still 321 // typed as a pointer so that any other writes from Go get 322 // write barriers. 323 sp uintptr 324 pc uintptr 325 g guintptr 326 ctxt unsafe.Pointer 327 ret sys.Uintreg 328 lr uintptr 329 bp uintptr // for GOEXPERIMENT=framepointer 330 } 331 332 // sudog represents a g in a wait list, such as for sending/receiving 333 // on a channel. 334 // 335 // sudog is necessary because the g ↔ synchronization object relation 336 // is many-to-many. A g can be on many wait lists, so there may be 337 // many sudogs for one g; and many gs may be waiting on the same 338 // synchronization object, so there may be many sudogs for one object. 339 // 340 // sudogs are allocated from a special pool. Use acquireSudog and 341 // releaseSudog to allocate and free them. 342 type sudog struct { 343 // The following fields are protected by the hchan.lock of the 344 // channel this sudog is blocking on. shrinkstack depends on 345 // this for sudogs involved in channel ops. 346 347 g *g 348 349 // isSelect indicates g is participating in a select, so 350 // g.selectDone must be CAS'd to win the wake-up race. 351 isSelect bool 352 next *sudog 353 prev *sudog 354 elem unsafe.Pointer // data element (may point to stack) 355 356 // The following fields are never accessed concurrently. 357 // For channels, waitlink is only accessed by g. 358 // For semaphores, all fields (including the ones above) 359 // are only accessed when holding a semaRoot lock. 360 361 acquiretime int64 362 releasetime int64 363 ticket uint32 364 parent *sudog // semaRoot binary tree 365 waitlink *sudog // g.waiting list or semaRoot 366 waittail *sudog // semaRoot 367 c *hchan // channel 368 } 369 370 type libcall struct { 371 fn uintptr 372 n uintptr // number of parameters 373 args uintptr // parameters 374 r1 uintptr // return values 375 r2 uintptr 376 err uintptr // error number 377 } 378 379 // describes how to handle callback 380 type wincallbackcontext struct { 381 gobody unsafe.Pointer // go function to call 382 argsize uintptr // callback arguments size (in bytes) 383 restorestack uintptr // adjust stack on return by (in bytes) (386 only) 384 cleanstack bool 385 } 386 387 // Stack describes a Go execution stack. 388 // The bounds of the stack are exactly [lo, hi), 389 // with no implicit data structures on either side. 390 type stack struct { 391 lo uintptr 392 hi uintptr 393 } 394 395 type g struct { 396 // Stack parameters. 397 // stack describes the actual stack memory: [stack.lo, stack.hi). 398 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 399 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 400 // stackguard1 is the stack pointer compared in the C stack growth prologue. 401 // It is stack.lo+StackGuard on g0 and gsignal stacks. 402 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 403 stack stack // offset known to runtime/cgo 404 stackguard0 uintptr // offset known to liblink 405 stackguard1 uintptr // offset known to liblink 406 407 _panic *_panic // innermost panic - offset known to liblink 408 _defer *_defer // innermost defer 409 m *m // current m; offset known to arm liblink 410 sched gobuf 411 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 412 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 413 stktopsp uintptr // expected sp at top of stack, to check in traceback 414 param unsafe.Pointer // passed parameter on wakeup 415 atomicstatus uint32 416 stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 417 goid int64 418 schedlink guintptr 419 waitsince int64 // approx time when the g become blocked 420 waitreason waitReason // if status==Gwaiting 421 422 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 423 preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule 424 preemptShrink bool // shrink stack at synchronous safe point 425 426 // asyncSafePoint is set if g is stopped at an asynchronous 427 // safe point. This means there are frames on the stack 428 // without precise pointer information. 429 asyncSafePoint bool 430 431 paniconfault bool // panic (instead of crash) on unexpected fault address 432 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 433 throwsplit bool // must not split stack 434 // activeStackChans indicates that there are unlocked channels 435 // pointing into this goroutine's stack. If true, stack 436 // copying needs to acquire channel locks to protect these 437 // areas of the stack. 438 activeStackChans bool 439 440 raceignore int8 // ignore race detection events 441 sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine 442 sysexitticks int64 // cputicks when syscall has returned (for tracing) 443 traceseq uint64 // trace event sequencer 444 tracelastp puintptr // last P emitted an event for this goroutine 445 lockedm muintptr 446 sig uint32 447 writebuf []byte 448 sigcode0 uintptr 449 sigcode1 uintptr 450 sigpc uintptr 451 gopc uintptr // pc of go statement that created this goroutine 452 ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) 453 startpc uintptr // pc of goroutine function 454 racectx uintptr 455 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order 456 cgoCtxt []uintptr // cgo traceback context 457 labels unsafe.Pointer // profiler labels 458 timer *timer // cached timer for time.Sleep 459 selectDone uint32 // are we participating in a select and did someone win the race? 460 461 // Per-G GC state 462 463 // gcAssistBytes is this G's GC assist credit in terms of 464 // bytes allocated. If this is positive, then the G has credit 465 // to allocate gcAssistBytes bytes without assisting. If this 466 // is negative, then the G must correct this by performing 467 // scan work. We track this in bytes to make it fast to update 468 // and check for debt in the malloc hot path. The assist ratio 469 // determines how this corresponds to scan work debt. 470 gcAssistBytes int64 471 } 472 473 type m struct { 474 g0 *g // goroutine with scheduling stack 475 morebuf gobuf // gobuf arg to morestack 476 divmod uint32 // div/mod denominator for arm - known to liblink 477 478 // Fields not known to debuggers. 479 procid uint64 // for debuggers, but offset not hard-coded 480 gsignal *g // signal-handling g 481 goSigStack gsignalStack // Go-allocated signal handling stack 482 sigmask sigset // storage for saved signal mask 483 tls [6]uintptr // thread-local storage (for x86 extern register) 484 mstartfn func() 485 curg *g // current running goroutine 486 caughtsig guintptr // goroutine running during fatal signal 487 p puintptr // attached p for executing go code (nil if not executing go code) 488 nextp puintptr 489 oldp puintptr // the p that was attached before executing a syscall 490 id int64 491 mallocing int32 492 throwing int32 493 preemptoff string // if != "", keep curg running on this m 494 locks int32 495 dying int32 496 profilehz int32 497 spinning bool // m is out of work and is actively looking for work 498 blocked bool // m is blocked on a note 499 newSigstack bool // minit on C thread called sigaltstack 500 printlock int8 501 incgo bool // m is executing a cgo call 502 freeWait uint32 // if == 0, safe to free g0 and delete m (atomic) 503 fastrand [2]uint32 504 needextram bool 505 traceback uint8 506 ncgocall uint64 // number of cgo calls in total 507 ncgo int32 // number of cgo calls currently in progress 508 cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily 509 cgoCallers *cgoCallers // cgo traceback if crashing in cgo call 510 park note 511 alllink *m // on allm 512 schedlink muintptr 513 mcache *mcache 514 lockedg guintptr 515 createstack [32]uintptr // stack that created this thread. 516 lockedExt uint32 // tracking for external LockOSThread 517 lockedInt uint32 // tracking for internal lockOSThread 518 nextwaitm muintptr // next m waiting for lock 519 waitunlockf func(*g, unsafe.Pointer) bool 520 waitlock unsafe.Pointer 521 waittraceev byte 522 waittraceskip int 523 startingtrace bool 524 syscalltick uint32 525 freelink *m // on sched.freem 526 527 // these are here because they are too large to be on the stack 528 // of low-level NOSPLIT functions. 529 libcall libcall 530 libcallpc uintptr // for cpu profiler 531 libcallsp uintptr 532 libcallg guintptr 533 syscall libcall // stores syscall parameters on windows 534 535 vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call) 536 vdsoPC uintptr // PC for traceback while in VDSO call 537 538 // preemptGen counts the number of completed preemption 539 // signals. This is used to detect when a preemption is 540 // requested, but fails. Accessed atomically. 541 preemptGen uint32 542 543 dlogPerM 544 545 mOS 546 } 547 548 type p struct { 549 id int32 550 status uint32 // one of pidle/prunning/... 551 link puintptr 552 schedtick uint32 // incremented on every scheduler call 553 syscalltick uint32 // incremented on every system call 554 sysmontick sysmontick // last tick observed by sysmon 555 m muintptr // back-link to associated m (nil if idle) 556 mcache *mcache 557 pcache pageCache 558 raceprocctx uintptr 559 560 deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go) 561 deferpoolbuf [5][32]*_defer 562 563 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. 564 goidcache uint64 565 goidcacheend uint64 566 567 // Queue of runnable goroutines. Accessed without lock. 568 runqhead uint32 569 runqtail uint32 570 runq [256]guintptr 571 // runnext, if non-nil, is a runnable G that was ready'd by 572 // the current G and should be run next instead of what's in 573 // runq if there's time remaining in the running G's time 574 // slice. It will inherit the time left in the current time 575 // slice. If a set of goroutines is locked in a 576 // communicate-and-wait pattern, this schedules that set as a 577 // unit and eliminates the (potentially large) scheduling 578 // latency that otherwise arises from adding the ready'd 579 // goroutines to the end of the run queue. 580 runnext guintptr 581 582 // Available G's (status == Gdead) 583 gFree struct { 584 gList 585 n int32 586 } 587 588 sudogcache []*sudog 589 sudogbuf [128]*sudog 590 591 // Cache of mspan objects from the heap. 592 mspancache struct { 593 // We need an explicit length here because this field is used 594 // in allocation codepaths where write barriers are not allowed, 595 // and eliminating the write barrier/keeping it eliminated from 596 // slice updates is tricky, moreso than just managing the length 597 // ourselves. 598 len int 599 buf [128]*mspan 600 } 601 602 tracebuf traceBufPtr 603 604 // traceSweep indicates the sweep events should be traced. 605 // This is used to defer the sweep start event until a span 606 // has actually been swept. 607 traceSweep bool 608 // traceSwept and traceReclaimed track the number of bytes 609 // swept and reclaimed by sweeping in the current sweep loop. 610 traceSwept, traceReclaimed uintptr 611 612 palloc persistentAlloc // per-P to avoid mutex 613 614 _ uint32 // Alignment for atomic fields below 615 616 // The when field of the first entry on the timer heap. 617 // This is updated using atomic functions. 618 // This is 0 if the timer heap is empty. 619 timer0When uint64 620 621 // Per-P GC state 622 gcAssistTime int64 // Nanoseconds in assistAlloc 623 gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic) 624 gcBgMarkWorker guintptr // (atomic) 625 gcMarkWorkerMode gcMarkWorkerMode 626 627 // gcMarkWorkerStartTime is the nanotime() at which this mark 628 // worker started. 629 gcMarkWorkerStartTime int64 630 631 // gcw is this P's GC work buffer cache. The work buffer is 632 // filled by write barriers, drained by mutator assists, and 633 // disposed on certain GC state transitions. 634 gcw gcWork 635 636 // wbBuf is this P's GC write barrier buffer. 637 // 638 // TODO: Consider caching this in the running G. 639 wbBuf wbBuf 640 641 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 642 643 // Lock for timers. We normally access the timers while running 644 // on this P, but the scheduler can also do it from a different P. 645 timersLock mutex 646 647 // Actions to take at some time. This is used to implement the 648 // standard library's time package. 649 // Must hold timersLock to access. 650 timers []*timer 651 652 // Number of timers in P's heap. 653 // Modified using atomic instructions. 654 numTimers uint32 655 656 // Number of timerModifiedEarlier timers on P's heap. 657 // This should only be modified while holding timersLock, 658 // or while the timer status is in a transient state 659 // such as timerModifying. 660 adjustTimers uint32 661 662 // Number of timerDeleted timers in P's heap. 663 // Modified using atomic instructions. 664 deletedTimers uint32 665 666 // Race context used while executing timer functions. 667 timerRaceCtx uintptr 668 669 // preempt is set to indicate that this P should be enter the 670 // scheduler ASAP (regardless of what G is running on it). 671 preempt bool 672 673 pad cpu.CacheLinePad 674 } 675 676 type schedt struct { 677 // accessed atomically. keep at top to ensure alignment on 32-bit systems. 678 goidgen uint64 679 lastpoll uint64 // time of last network poll, 0 if currently polling 680 pollUntil uint64 // time to which current poll is sleeping 681 682 lock mutex 683 684 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be 685 // sure to call checkdead(). 686 687 midle muintptr // idle m's waiting for work 688 nmidle int32 // number of idle m's waiting for work 689 nmidlelocked int32 // number of locked m's waiting for work 690 mnext int64 // number of m's that have been created and next M ID 691 maxmcount int32 // maximum number of m's allowed (or die) 692 nmsys int32 // number of system m's not counted for deadlock 693 nmfreed int64 // cumulative number of freed m's 694 695 ngsys uint32 // number of system goroutines; updated atomically 696 697 pidle puintptr // idle p's 698 npidle uint32 699 nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go. 700 701 // Global runnable queue. 702 runq gQueue 703 runqsize int32 704 705 // disable controls selective disabling of the scheduler. 706 // 707 // Use schedEnableUser to control this. 708 // 709 // disable is protected by sched.lock. 710 disable struct { 711 // user disables scheduling of user goroutines. 712 user bool 713 runnable gQueue // pending runnable Gs 714 n int32 // length of runnable 715 } 716 717 // Global cache of dead G's. 718 gFree struct { 719 lock mutex 720 stack gList // Gs with stacks 721 noStack gList // Gs without stacks 722 n int32 723 } 724 725 // Central cache of sudog structs. 726 sudoglock mutex 727 sudogcache *sudog 728 729 // Central pool of available defer structs of different sizes. 730 deferlock mutex 731 deferpool [5]*_defer 732 733 // freem is the list of m's waiting to be freed when their 734 // m.exited is set. Linked through m.freelink. 735 freem *m 736 737 gcwaiting uint32 // gc is waiting to run 738 stopwait int32 739 stopnote note 740 sysmonwait uint32 741 sysmonnote note 742 743 // safepointFn should be called on each P at the next GC 744 // safepoint if p.runSafePointFn is set. 745 safePointFn func(*p) 746 safePointWait int32 747 safePointNote note 748 749 profilehz int32 // cpu profiling rate 750 751 procresizetime int64 // nanotime() of last change to gomaxprocs 752 totaltime int64 // ∫gomaxprocs dt up to procresizetime 753 } 754 755 // Values for the flags field of a sigTabT. 756 const ( 757 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 758 _SigKill // if signal.Notify doesn't take it, exit quietly 759 _SigThrow // if signal.Notify doesn't take it, exit loudly 760 _SigPanic // if the signal is from the kernel, panic 761 _SigDefault // if the signal isn't explicitly requested, don't monitor it 762 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 763 _SigSetStack // add SA_ONSTACK to libc handler 764 _SigUnblock // always unblock; see blockableSig 765 _SigIgn // _SIG_DFL action is to ignore the signal 766 ) 767 768 // Layout of in-memory per-function information prepared by linker 769 // See https://golang.org/s/go12symtab. 770 // Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab) 771 // and with package debug/gosym and with symtab.go in package runtime. 772 type _func struct { 773 entry uintptr // start pc 774 nameoff int32 // function name 775 776 args int32 // in/out args size 777 deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. 778 779 pcsp int32 780 pcfile int32 781 pcln int32 782 npcdata int32 783 funcID funcID // set for certain special runtime functions 784 _ [2]int8 // unused 785 nfuncdata uint8 // must be last 786 } 787 788 // Pseudo-Func that is returned for PCs that occur in inlined code. 789 // A *Func can be either a *_func or a *funcinl, and they are distinguished 790 // by the first uintptr. 791 type funcinl struct { 792 zero uintptr // set to 0 to distinguish from _func 793 entry uintptr // entry of the real (the "outermost") frame. 794 name string 795 file string 796 line int 797 } 798 799 // layout of Itab known to compilers 800 // allocated in non-garbage-collected memory 801 // Needs to be in sync with 802 // ../cmd/compile/internal/gc/reflect.go:/^func.dumptabs. 803 type itab struct { 804 inter *interfacetype 805 _type *_type 806 hash uint32 // copy of _type.hash. Used for type switches. 807 _ [4]byte 808 fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter. 809 } 810 811 // Lock-free stack node. 812 // Also known to export_test.go. 813 type lfnode struct { 814 next uint64 815 pushcnt uintptr 816 } 817 818 type forcegcstate struct { 819 lock mutex 820 g *g 821 idle uint32 822 } 823 824 // startup_random_data holds random bytes initialized at startup. These come from 825 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). 826 var startupRandomData []byte 827 828 // extendRandom extends the random numbers in r[:n] to the whole slice r. 829 // Treats n<0 as n==0. 830 func extendRandom(r []byte, n int) { 831 if n < 0 { 832 n = 0 833 } 834 for n < len(r) { 835 // Extend random bits using hash function & time seed 836 w := n 837 if w > 16 { 838 w = 16 839 } 840 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w)) 841 for i := 0; i < sys.PtrSize && n < len(r); i++ { 842 r[n] = byte(h) 843 n++ 844 h >>= 8 845 } 846 } 847 } 848 849 // A _defer holds an entry on the list of deferred calls. 850 // If you add a field here, add code to clear it in freedefer and deferProcStack 851 // This struct must match the code in cmd/compile/internal/gc/reflect.go:deferstruct 852 // and cmd/compile/internal/gc/ssa.go:(*state).call. 853 // Some defers will be allocated on the stack and some on the heap. 854 // All defers are logically part of the stack, so write barriers to 855 // initialize them are not required. All defers must be manually scanned, 856 // and for heap defers, marked. 857 type _defer struct { 858 siz int32 // includes both arguments and results 859 started bool 860 heap bool 861 // openDefer indicates that this _defer is for a frame with open-coded 862 // defers. We have only one defer record for the entire frame (which may 863 // currently have 0, 1, or more defers active). 864 openDefer bool 865 sp uintptr // sp at time of defer 866 pc uintptr // pc at time of defer 867 fn *funcval // can be nil for open-coded defers 868 _panic *_panic // panic that is running defer 869 link *_defer 870 871 // If openDefer is true, the fields below record values about the stack 872 // frame and associated function that has the open-coded defer(s). sp 873 // above will be the sp for the frame, and pc will be address of the 874 // deferreturn call in the function. 875 fd unsafe.Pointer // funcdata for the function associated with the frame 876 varp uintptr // value of varp for the stack frame 877 // framepc is the current pc associated with the stack frame. Together, 878 // with sp above (which is the sp associated with the stack frame), 879 // framepc/sp can be used as pc/sp pair to continue a stack trace via 880 // gentraceback(). 881 framepc uintptr 882 } 883 884 // A _panic holds information about an active panic. 885 // 886 // This is marked go:notinheap because _panic values must only ever 887 // live on the stack. 888 // 889 // The argp and link fields are stack pointers, but don't need special 890 // handling during stack growth: because they are pointer-typed and 891 // _panic values only live on the stack, regular stack pointer 892 // adjustment takes care of them. 893 // 894 //go:notinheap 895 type _panic struct { 896 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink 897 arg interface{} // argument to panic 898 link *_panic // link to earlier panic 899 pc uintptr // where to return to in runtime if this panic is bypassed 900 sp unsafe.Pointer // where to return to in runtime if this panic is bypassed 901 recovered bool // whether this panic is over 902 aborted bool // the panic was aborted 903 goexit bool 904 } 905 906 // stack traces 907 type stkframe struct { 908 fn funcInfo // function being run 909 pc uintptr // program counter within fn 910 continpc uintptr // program counter where execution can continue, or 0 if not 911 lr uintptr // program counter at caller aka link register 912 sp uintptr // stack pointer at pc 913 fp uintptr // stack pointer at caller aka frame pointer 914 varp uintptr // top of local variables 915 argp uintptr // pointer to function arguments 916 arglen uintptr // number of bytes at argp 917 argmap *bitvector // force use of this argmap 918 } 919 920 // ancestorInfo records details of where a goroutine was started. 921 type ancestorInfo struct { 922 pcs []uintptr // pcs from the stack of this goroutine 923 goid int64 // goroutine id of this goroutine; original goroutine possibly dead 924 gopc uintptr // pc of go statement that created this goroutine 925 } 926 927 const ( 928 _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions. 929 _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call 930 _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it 931 ) 932 933 // The maximum number of frames we print for a traceback 934 const _TracebackMaxFrames = 100 935 936 // A waitReason explains why a goroutine has been stopped. 937 // See gopark. Do not re-use waitReasons, add new ones. 938 type waitReason uint8 939 940 const ( 941 waitReasonZero waitReason = iota // "" 942 waitReasonGCAssistMarking // "GC assist marking" 943 waitReasonIOWait // "IO wait" 944 waitReasonChanReceiveNilChan // "chan receive (nil chan)" 945 waitReasonChanSendNilChan // "chan send (nil chan)" 946 waitReasonDumpingHeap // "dumping heap" 947 waitReasonGarbageCollection // "garbage collection" 948 waitReasonGarbageCollectionScan // "garbage collection scan" 949 waitReasonPanicWait // "panicwait" 950 waitReasonSelect // "select" 951 waitReasonSelectNoCases // "select (no cases)" 952 waitReasonGCAssistWait // "GC assist wait" 953 waitReasonGCSweepWait // "GC sweep wait" 954 waitReasonGCScavengeWait // "GC scavenge wait" 955 waitReasonChanReceive // "chan receive" 956 waitReasonChanSend // "chan send" 957 waitReasonFinalizerWait // "finalizer wait" 958 waitReasonForceGGIdle // "force gc (idle)" 959 waitReasonSemacquire // "semacquire" 960 waitReasonSleep // "sleep" 961 waitReasonSyncCondWait // "sync.Cond.Wait" 962 waitReasonTimerGoroutineIdle // "timer goroutine (idle)" 963 waitReasonTraceReaderBlocked // "trace reader (blocked)" 964 waitReasonWaitForGCCycle // "wait for GC cycle" 965 waitReasonGCWorkerIdle // "GC worker (idle)" 966 waitReasonPreempted // "preempted" 967 ) 968 969 var waitReasonStrings = [...]string{ 970 waitReasonZero: "", 971 waitReasonGCAssistMarking: "GC assist marking", 972 waitReasonIOWait: "IO wait", 973 waitReasonChanReceiveNilChan: "chan receive (nil chan)", 974 waitReasonChanSendNilChan: "chan send (nil chan)", 975 waitReasonDumpingHeap: "dumping heap", 976 waitReasonGarbageCollection: "garbage collection", 977 waitReasonGarbageCollectionScan: "garbage collection scan", 978 waitReasonPanicWait: "panicwait", 979 waitReasonSelect: "select", 980 waitReasonSelectNoCases: "select (no cases)", 981 waitReasonGCAssistWait: "GC assist wait", 982 waitReasonGCSweepWait: "GC sweep wait", 983 waitReasonGCScavengeWait: "GC scavenge wait", 984 waitReasonChanReceive: "chan receive", 985 waitReasonChanSend: "chan send", 986 waitReasonFinalizerWait: "finalizer wait", 987 waitReasonForceGGIdle: "force gc (idle)", 988 waitReasonSemacquire: "semacquire", 989 waitReasonSleep: "sleep", 990 waitReasonSyncCondWait: "sync.Cond.Wait", 991 waitReasonTimerGoroutineIdle: "timer goroutine (idle)", 992 waitReasonTraceReaderBlocked: "trace reader (blocked)", 993 waitReasonWaitForGCCycle: "wait for GC cycle", 994 waitReasonGCWorkerIdle: "GC worker (idle)", 995 waitReasonPreempted: "preempted", 996 } 997 998 func (w waitReason) String() string { 999 if w < 0 || w >= waitReason(len(waitReasonStrings)) { 1000 return "unknown wait reason" 1001 } 1002 return waitReasonStrings[w] 1003 } 1004 1005 var ( 1006 allglen uintptr 1007 allm *m 1008 allp []*p // len(allp) == gomaxprocs; may change at safe points, otherwise immutable 1009 allpLock mutex // Protects P-less reads of allp and all writes 1010 gomaxprocs int32 1011 ncpu int32 1012 forcegc forcegcstate 1013 sched schedt 1014 newprocs int32 1015 1016 // Information about what cpu features are available. 1017 // Packages outside the runtime should not use these 1018 // as they are not an external api. 1019 // Set on startup in asm_{386,amd64}.s 1020 processorVersionInfo uint32 1021 isIntel bool 1022 lfenceBeforeRdtsc bool 1023 1024 goarm uint8 // set by cmd/link on arm systems 1025 framepointer_enabled bool // set by cmd/link 1026 ) 1027 1028 // Set by the linker so the runtime can determine the buildmode. 1029 var ( 1030 islibrary bool // -buildmode=c-shared 1031 isarchive bool // -buildmode=c-archive 1032 )