qoobing.com/gomod/log@v1.2.8/logid-runtime-patch/unknown/runtime2.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/cpu" 9 "runtime/internal/atomic" 10 "runtime/internal/sys" 11 "unsafe" 12 ) 13 14 // defined constants 15 const ( 16 // G status 17 // 18 // Beyond indicating the general state of a G, the G status 19 // acts like a lock on the goroutine's stack (and hence its 20 // ability to execute user code). 21 // 22 // If you add to this list, add to the list 23 // of "okay during garbage collection" status 24 // in mgcmark.go too. 25 // 26 // TODO(austin): The _Gscan bit could be much lighter-weight. 27 // For example, we could choose not to run _Gscanrunnable 28 // goroutines found in the run queue, rather than CAS-looping 29 // until they become _Grunnable. And transitions like 30 // _Gscanwaiting -> _Gscanrunnable are actually okay because 31 // they don't affect stack ownership. 32 33 // _Gidle means this goroutine was just allocated and has not 34 // yet been initialized. 35 _Gidle = iota // 0 36 37 // _Grunnable means this goroutine is on a run queue. It is 38 // not currently executing user code. The stack is not owned. 39 _Grunnable // 1 40 41 // _Grunning means this goroutine may execute user code. The 42 // stack is owned by this goroutine. It is not on a run queue. 43 // It is assigned an M and a P (g.m and g.m.p are valid). 44 _Grunning // 2 45 46 // _Gsyscall means this goroutine is executing a system call. 47 // It is not executing user code. The stack is owned by this 48 // goroutine. It is not on a run queue. It is assigned an M. 49 _Gsyscall // 3 50 51 // _Gwaiting means this goroutine is blocked in the runtime. 52 // It is not executing user code. It is not on a run queue, 53 // but should be recorded somewhere (e.g., a channel wait 54 // queue) so it can be ready()d when necessary. The stack is 55 // not owned *except* that a channel operation may read or 56 // write parts of the stack under the appropriate channel 57 // lock. Otherwise, it is not safe to access the stack after a 58 // goroutine enters _Gwaiting (e.g., it may get moved). 59 _Gwaiting // 4 60 61 // _Gmoribund_unused is currently unused, but hardcoded in gdb 62 // scripts. 63 _Gmoribund_unused // 5 64 65 // _Gdead means this goroutine is currently unused. It may be 66 // just exited, on a free list, or just being initialized. It 67 // is not executing user code. It may or may not have a stack 68 // allocated. The G and its stack (if any) are owned by the M 69 // that is exiting the G or that obtained the G from the free 70 // list. 71 _Gdead // 6 72 73 // _Genqueue_unused is currently unused. 74 _Genqueue_unused // 7 75 76 // _Gcopystack means this goroutine's stack is being moved. It 77 // is not executing user code and is not on a run queue. The 78 // stack is owned by the goroutine that put it in _Gcopystack. 79 _Gcopystack // 8 80 81 // _Gpreempted means this goroutine stopped itself for a 82 // suspendG preemption. It is like _Gwaiting, but nothing is 83 // yet responsible for ready()ing it. Some suspendG must CAS 84 // the status to _Gwaiting to take responsibility for 85 // ready()ing this G. 86 _Gpreempted // 9 87 88 // _Gscan combined with one of the above states other than 89 // _Grunning indicates that GC is scanning the stack. The 90 // goroutine is not executing user code and the stack is owned 91 // by the goroutine that set the _Gscan bit. 92 // 93 // _Gscanrunning is different: it is used to briefly block 94 // state transitions while GC signals the G to scan its own 95 // stack. This is otherwise like _Grunning. 96 // 97 // atomicstatus&~Gscan gives the state the goroutine will 98 // return to when the scan completes. 99 _Gscan = 0x1000 100 _Gscanrunnable = _Gscan + _Grunnable // 0x1001 101 _Gscanrunning = _Gscan + _Grunning // 0x1002 102 _Gscansyscall = _Gscan + _Gsyscall // 0x1003 103 _Gscanwaiting = _Gscan + _Gwaiting // 0x1004 104 _Gscanpreempted = _Gscan + _Gpreempted // 0x1009 105 ) 106 107 const ( 108 // P status 109 110 // _Pidle means a P is not being used to run user code or the 111 // scheduler. Typically, it's on the idle P list and available 112 // to the scheduler, but it may just be transitioning between 113 // other states. 114 // 115 // The P is owned by the idle list or by whatever is 116 // transitioning its state. Its run queue is empty. 117 _Pidle = iota 118 119 // _Prunning means a P is owned by an M and is being used to 120 // run user code or the scheduler. Only the M that owns this P 121 // is allowed to change the P's status from _Prunning. The M 122 // may transition the P to _Pidle (if it has no more work to 123 // do), _Psyscall (when entering a syscall), or _Pgcstop (to 124 // halt for the GC). The M may also hand ownership of the P 125 // off directly to another M (e.g., to schedule a locked G). 126 _Prunning 127 128 // _Psyscall means a P is not running user code. It has 129 // affinity to an M in a syscall but is not owned by it and 130 // may be stolen by another M. This is similar to _Pidle but 131 // uses lightweight transitions and maintains M affinity. 132 // 133 // Leaving _Psyscall must be done with a CAS, either to steal 134 // or retake the P. Note that there's an ABA hazard: even if 135 // an M successfully CASes its original P back to _Prunning 136 // after a syscall, it must understand the P may have been 137 // used by another M in the interim. 138 _Psyscall 139 140 // _Pgcstop means a P is halted for STW and owned by the M 141 // that stopped the world. The M that stopped the world 142 // continues to use its P, even in _Pgcstop. Transitioning 143 // from _Prunning to _Pgcstop causes an M to release its P and 144 // park. 145 // 146 // The P retains its run queue and startTheWorld will restart 147 // the scheduler on Ps with non-empty run queues. 148 _Pgcstop 149 150 // _Pdead means a P is no longer used (GOMAXPROCS shrank). We 151 // reuse Ps if GOMAXPROCS increases. A dead P is mostly 152 // stripped of its resources, though a few things remain 153 // (e.g., trace buffers). 154 _Pdead 155 ) 156 157 // Mutual exclusion locks. In the uncontended case, 158 // as fast as spin locks (just a few user-level instructions), 159 // but on the contention path they sleep in the kernel. 160 // A zeroed Mutex is unlocked (no need to initialize each lock). 161 type mutex struct { 162 // Futex-based impl treats it as uint32 key, 163 // while sema-based impl as M* waitm. 164 // Used to be a union, but unions break precise GC. 165 key uintptr 166 } 167 168 // sleep and wakeup on one-time events. 169 // before any calls to notesleep or notewakeup, 170 // must call noteclear to initialize the Note. 171 // then, exactly one thread can call notesleep 172 // and exactly one thread can call notewakeup (once). 173 // once notewakeup has been called, the notesleep 174 // will return. future notesleep will return immediately. 175 // subsequent noteclear must be called only after 176 // previous notesleep has returned, e.g. it's disallowed 177 // to call noteclear straight after notewakeup. 178 // 179 // notetsleep is like notesleep but wakes up after 180 // a given number of nanoseconds even if the event 181 // has not yet happened. if a goroutine uses notetsleep to 182 // wake up early, it must wait to call noteclear until it 183 // can be sure that no other goroutine is calling 184 // notewakeup. 185 // 186 // notesleep/notetsleep are generally called on g0, 187 // notetsleepg is similar to notetsleep but is called on user g. 188 type note struct { 189 // Futex-based impl treats it as uint32 key, 190 // while sema-based impl as M* waitm. 191 // Used to be a union, but unions break precise GC. 192 key uintptr 193 } 194 195 type funcval struct { 196 fn uintptr 197 // variable-size, fn-specific data here 198 } 199 200 type iface struct { 201 tab *itab 202 data unsafe.Pointer 203 } 204 205 type eface struct { 206 _type *_type 207 data unsafe.Pointer 208 } 209 210 func efaceOf(ep *interface{}) *eface { 211 return (*eface)(unsafe.Pointer(ep)) 212 } 213 214 // The guintptr, muintptr, and puintptr are all used to bypass write barriers. 215 // It is particularly important to avoid write barriers when the current P has 216 // been released, because the GC thinks the world is stopped, and an 217 // unexpected write barrier would not be synchronized with the GC, 218 // which can lead to a half-executed write barrier that has marked the object 219 // but not queued it. If the GC skips the object and completes before the 220 // queuing can occur, it will incorrectly free the object. 221 // 222 // We tried using special assignment functions invoked only when not 223 // holding a running P, but then some updates to a particular memory 224 // word went through write barriers and some did not. This breaks the 225 // write barrier shadow checking mode, and it is also scary: better to have 226 // a word that is completely ignored by the GC than to have one for which 227 // only a few updates are ignored. 228 // 229 // Gs and Ps are always reachable via true pointers in the 230 // allgs and allp lists or (during allocation before they reach those lists) 231 // from stack variables. 232 // 233 // Ms are always reachable via true pointers either from allm or 234 // freem. Unlike Gs and Ps we do free Ms, so it's important that 235 // nothing ever hold an muintptr across a safe point. 236 237 // A guintptr holds a goroutine pointer, but typed as a uintptr 238 // to bypass write barriers. It is used in the Gobuf goroutine state 239 // and in scheduling lists that are manipulated without a P. 240 // 241 // The Gobuf.g goroutine pointer is almost always updated by assembly code. 242 // In one of the few places it is updated by Go code - func save - it must be 243 // treated as a uintptr to avoid a write barrier being emitted at a bad time. 244 // Instead of figuring out how to emit the write barriers missing in the 245 // assembly manipulation, we change the type of the field to uintptr, 246 // so that it does not require write barriers at all. 247 // 248 // Goroutine structs are published in the allg list and never freed. 249 // That will keep the goroutine structs from being collected. 250 // There is never a time that Gobuf.g's contain the only references 251 // to a goroutine: the publishing of the goroutine in allg comes first. 252 // Goroutine pointers are also kept in non-GC-visible places like TLS, 253 // so I can't see them ever moving. If we did want to start moving data 254 // in the GC, we'd need to allocate the goroutine structs from an 255 // alternate arena. Using guintptr doesn't make that problem any worse. 256 type guintptr uintptr 257 258 //go:nosplit 259 func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) } 260 261 //go:nosplit 262 func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) } 263 264 //go:nosplit 265 func (gp *guintptr) cas(old, new guintptr) bool { 266 return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) 267 } 268 269 // setGNoWB performs *gp = new without a write barrier. 270 // For times when it's impractical to use a guintptr. 271 //go:nosplit 272 //go:nowritebarrier 273 func setGNoWB(gp **g, new *g) { 274 (*guintptr)(unsafe.Pointer(gp)).set(new) 275 } 276 277 type puintptr uintptr 278 279 //go:nosplit 280 func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) } 281 282 //go:nosplit 283 func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) } 284 285 // muintptr is a *m that is not tracked by the garbage collector. 286 // 287 // Because we do free Ms, there are some additional constrains on 288 // muintptrs: 289 // 290 // 1. Never hold an muintptr locally across a safe point. 291 // 292 // 2. Any muintptr in the heap must be owned by the M itself so it can 293 // ensure it is not in use when the last true *m is released. 294 type muintptr uintptr 295 296 //go:nosplit 297 func (mp muintptr) ptr() *m { return (*m)(unsafe.Pointer(mp)) } 298 299 //go:nosplit 300 func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } 301 302 // setMNoWB performs *mp = new without a write barrier. 303 // For times when it's impractical to use an muintptr. 304 //go:nosplit 305 //go:nowritebarrier 306 func setMNoWB(mp **m, new *m) { 307 (*muintptr)(unsafe.Pointer(mp)).set(new) 308 } 309 310 type gobuf struct { 311 // The offsets of sp, pc, and g are known to (hard-coded in) libmach. 312 // 313 // ctxt is unusual with respect to GC: it may be a 314 // heap-allocated funcval, so GC needs to track it, but it 315 // needs to be set and cleared from assembly, where it's 316 // difficult to have write barriers. However, ctxt is really a 317 // saved, live register, and we only ever exchange it between 318 // the real register and the gobuf. Hence, we treat it as a 319 // root during stack scanning, which means assembly that saves 320 // and restores it doesn't need write barriers. It's still 321 // typed as a pointer so that any other writes from Go get 322 // write barriers. 323 sp uintptr 324 pc uintptr 325 g guintptr 326 ctxt unsafe.Pointer 327 ret sys.Uintreg 328 lr uintptr 329 bp uintptr // for GOEXPERIMENT=framepointer 330 } 331 332 // sudog represents a g in a wait list, such as for sending/receiving 333 // on a channel. 334 // 335 // sudog is necessary because the g ↔ synchronization object relation 336 // is many-to-many. A g can be on many wait lists, so there may be 337 // many sudogs for one g; and many gs may be waiting on the same 338 // synchronization object, so there may be many sudogs for one object. 339 // 340 // sudogs are allocated from a special pool. Use acquireSudog and 341 // releaseSudog to allocate and free them. 342 type sudog struct { 343 // The following fields are protected by the hchan.lock of the 344 // channel this sudog is blocking on. shrinkstack depends on 345 // this for sudogs involved in channel ops. 346 347 g *g 348 349 // isSelect indicates g is participating in a select, so 350 // g.selectDone must be CAS'd to win the wake-up race. 351 isSelect bool 352 next *sudog 353 prev *sudog 354 elem unsafe.Pointer // data element (may point to stack) 355 356 // The following fields are never accessed concurrently. 357 // For channels, waitlink is only accessed by g. 358 // For semaphores, all fields (including the ones above) 359 // are only accessed when holding a semaRoot lock. 360 361 acquiretime int64 362 releasetime int64 363 ticket uint32 364 parent *sudog // semaRoot binary tree 365 waitlink *sudog // g.waiting list or semaRoot 366 waittail *sudog // semaRoot 367 c *hchan // channel 368 } 369 370 type libcall struct { 371 fn uintptr 372 n uintptr // number of parameters 373 args uintptr // parameters 374 r1 uintptr // return values 375 r2 uintptr 376 err uintptr // error number 377 } 378 379 // describes how to handle callback 380 type wincallbackcontext struct { 381 gobody unsafe.Pointer // go function to call 382 argsize uintptr // callback arguments size (in bytes) 383 restorestack uintptr // adjust stack on return by (in bytes) (386 only) 384 cleanstack bool 385 } 386 387 // Stack describes a Go execution stack. 388 // The bounds of the stack are exactly [lo, hi), 389 // with no implicit data structures on either side. 390 type stack struct { 391 lo uintptr 392 hi uintptr 393 } 394 395 type g struct { 396 // Stack parameters. 397 // stack describes the actual stack memory: [stack.lo, stack.hi). 398 // stackguard0 is the stack pointer compared in the Go stack growth prologue. 399 // It is stack.lo+StackGuard normally, but can be StackPreempt to trigger a preemption. 400 // stackguard1 is the stack pointer compared in the C stack growth prologue. 401 // It is stack.lo+StackGuard on g0 and gsignal stacks. 402 // It is ~0 on other goroutine stacks, to trigger a call to morestackc (and crash). 403 stack stack // offset known to runtime/cgo 404 stackguard0 uintptr // offset known to liblink 405 stackguard1 uintptr // offset known to liblink 406 407 _panic *_panic // innermost panic - offset known to liblink 408 _defer *_defer // innermost defer 409 m *m // current m; offset known to arm liblink 410 sched gobuf 411 syscallsp uintptr // if status==Gsyscall, syscallsp = sched.sp to use during gc 412 syscallpc uintptr // if status==Gsyscall, syscallpc = sched.pc to use during gc 413 stktopsp uintptr // expected sp at top of stack, to check in traceback 414 param unsafe.Pointer // passed parameter on wakeup 415 atomicstatus uint32 416 stackLock uint32 // sigprof/scang lock; TODO: fold in to atomicstatus 417 goid int64 418 schedlink guintptr 419 waitsince int64 // approx time when the g become blocked 420 waitreason waitReason // if status==Gwaiting 421 422 preempt bool // preemption signal, duplicates stackguard0 = stackpreempt 423 preemptStop bool // transition to _Gpreempted on preemption; otherwise, just deschedule 424 preemptShrink bool // shrink stack at synchronous safe point 425 426 // asyncSafePoint is set if g is stopped at an asynchronous 427 // safe point. This means there are frames on the stack 428 // without precise pointer information. 429 asyncSafePoint bool 430 431 paniconfault bool // panic (instead of crash) on unexpected fault address 432 gcscandone bool // g has scanned stack; protected by _Gscan bit in status 433 throwsplit bool // must not split stack 434 // activeStackChans indicates that there are unlocked channels 435 // pointing into this goroutine's stack. If true, stack 436 // copying needs to acquire channel locks to protect these 437 // areas of the stack. 438 activeStackChans bool 439 440 raceignore int8 // ignore race detection events 441 sysblocktraced bool // StartTrace has emitted EvGoInSyscall about this goroutine 442 sysexitticks int64 // cputicks when syscall has returned (for tracing) 443 traceseq uint64 // trace event sequencer 444 tracelastp puintptr // last P emitted an event for this goroutine 445 lockedm muintptr 446 sig uint32 447 writebuf []byte 448 sigcode0 uintptr 449 sigcode1 uintptr 450 sigpc uintptr 451 gopc uintptr // pc of go statement that created this goroutine 452 ancestors *[]ancestorInfo // ancestor information goroutine(s) that created this goroutine (only used if debug.tracebackancestors) 453 startpc uintptr // pc of goroutine function 454 racectx uintptr 455 waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order 456 cgoCtxt []uintptr // cgo traceback context 457 labels unsafe.Pointer // profiler labels 458 timer *timer // cached timer for time.Sleep 459 selectDone uint32 // are we participating in a select and did someone win the race? 460 461 // Per-G GC state 462 463 // gcAssistBytes is this G's GC assist credit in terms of 464 // bytes allocated. If this is positive, then the G has credit 465 // to allocate gcAssistBytes bytes without assisting. If this 466 // is negative, then the G must correct this by performing 467 // scan work. We track this in bytes to make it fast to update 468 // and check for debt in the malloc hot path. The assist ratio 469 // determines how this corresponds to scan work debt. 470 gcAssistBytes int64 471 472 ////// Add by q.bryant@live.com for logid @2020.09.10 ///////begain////// 473 logid int64 474 ////// Add by q.bryant@live.com for logid @2020.09.10 ///////end///////// 475 } 476 477 type m struct { 478 g0 *g // goroutine with scheduling stack 479 morebuf gobuf // gobuf arg to morestack 480 divmod uint32 // div/mod denominator for arm - known to liblink 481 482 // Fields not known to debuggers. 483 procid uint64 // for debuggers, but offset not hard-coded 484 gsignal *g // signal-handling g 485 goSigStack gsignalStack // Go-allocated signal handling stack 486 sigmask sigset // storage for saved signal mask 487 tls [6]uintptr // thread-local storage (for x86 extern register) 488 mstartfn func() 489 curg *g // current running goroutine 490 caughtsig guintptr // goroutine running during fatal signal 491 p puintptr // attached p for executing go code (nil if not executing go code) 492 nextp puintptr 493 oldp puintptr // the p that was attached before executing a syscall 494 id int64 495 mallocing int32 496 throwing int32 497 preemptoff string // if != "", keep curg running on this m 498 locks int32 499 dying int32 500 profilehz int32 501 spinning bool // m is out of work and is actively looking for work 502 blocked bool // m is blocked on a note 503 newSigstack bool // minit on C thread called sigaltstack 504 printlock int8 505 incgo bool // m is executing a cgo call 506 freeWait uint32 // if == 0, safe to free g0 and delete m (atomic) 507 fastrand [2]uint32 508 needextram bool 509 traceback uint8 510 ncgocall uint64 // number of cgo calls in total 511 ncgo int32 // number of cgo calls currently in progress 512 cgoCallersUse uint32 // if non-zero, cgoCallers in use temporarily 513 cgoCallers *cgoCallers // cgo traceback if crashing in cgo call 514 park note 515 alllink *m // on allm 516 schedlink muintptr 517 mcache *mcache 518 lockedg guintptr 519 createstack [32]uintptr // stack that created this thread. 520 lockedExt uint32 // tracking for external LockOSThread 521 lockedInt uint32 // tracking for internal lockOSThread 522 nextwaitm muintptr // next m waiting for lock 523 waitunlockf func(*g, unsafe.Pointer) bool 524 waitlock unsafe.Pointer 525 waittraceev byte 526 waittraceskip int 527 startingtrace bool 528 syscalltick uint32 529 freelink *m // on sched.freem 530 531 // these are here because they are too large to be on the stack 532 // of low-level NOSPLIT functions. 533 libcall libcall 534 libcallpc uintptr // for cpu profiler 535 libcallsp uintptr 536 libcallg guintptr 537 syscall libcall // stores syscall parameters on windows 538 539 vdsoSP uintptr // SP for traceback while in VDSO call (0 if not in call) 540 vdsoPC uintptr // PC for traceback while in VDSO call 541 542 // preemptGen counts the number of completed preemption 543 // signals. This is used to detect when a preemption is 544 // requested, but fails. Accessed atomically. 545 preemptGen uint32 546 547 // Whether this is a pending preemption signal on this M. 548 // Accessed atomically. 549 signalPending uint32 550 551 dlogPerM 552 553 mOS 554 } 555 556 type p struct { 557 id int32 558 status uint32 // one of pidle/prunning/... 559 link puintptr 560 schedtick uint32 // incremented on every scheduler call 561 syscalltick uint32 // incremented on every system call 562 sysmontick sysmontick // last tick observed by sysmon 563 m muintptr // back-link to associated m (nil if idle) 564 mcache *mcache 565 pcache pageCache 566 raceprocctx uintptr 567 568 deferpool [5][]*_defer // pool of available defer structs of different sizes (see panic.go) 569 deferpoolbuf [5][32]*_defer 570 571 // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen. 572 goidcache uint64 573 goidcacheend uint64 574 575 // Queue of runnable goroutines. Accessed without lock. 576 runqhead uint32 577 runqtail uint32 578 runq [256]guintptr 579 // runnext, if non-nil, is a runnable G that was ready'd by 580 // the current G and should be run next instead of what's in 581 // runq if there's time remaining in the running G's time 582 // slice. It will inherit the time left in the current time 583 // slice. If a set of goroutines is locked in a 584 // communicate-and-wait pattern, this schedules that set as a 585 // unit and eliminates the (potentially large) scheduling 586 // latency that otherwise arises from adding the ready'd 587 // goroutines to the end of the run queue. 588 runnext guintptr 589 590 // Available G's (status == Gdead) 591 gFree struct { 592 gList 593 n int32 594 } 595 596 sudogcache []*sudog 597 sudogbuf [128]*sudog 598 599 // Cache of mspan objects from the heap. 600 mspancache struct { 601 // We need an explicit length here because this field is used 602 // in allocation codepaths where write barriers are not allowed, 603 // and eliminating the write barrier/keeping it eliminated from 604 // slice updates is tricky, moreso than just managing the length 605 // ourselves. 606 len int 607 buf [128]*mspan 608 } 609 610 tracebuf traceBufPtr 611 612 // traceSweep indicates the sweep events should be traced. 613 // This is used to defer the sweep start event until a span 614 // has actually been swept. 615 traceSweep bool 616 // traceSwept and traceReclaimed track the number of bytes 617 // swept and reclaimed by sweeping in the current sweep loop. 618 traceSwept, traceReclaimed uintptr 619 620 palloc persistentAlloc // per-P to avoid mutex 621 622 _ uint32 // Alignment for atomic fields below 623 624 // The when field of the first entry on the timer heap. 625 // This is updated using atomic functions. 626 // This is 0 if the timer heap is empty. 627 timer0When uint64 628 629 // Per-P GC state 630 gcAssistTime int64 // Nanoseconds in assistAlloc 631 gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic) 632 gcBgMarkWorker guintptr // (atomic) 633 gcMarkWorkerMode gcMarkWorkerMode 634 635 // gcMarkWorkerStartTime is the nanotime() at which this mark 636 // worker started. 637 gcMarkWorkerStartTime int64 638 639 // gcw is this P's GC work buffer cache. The work buffer is 640 // filled by write barriers, drained by mutator assists, and 641 // disposed on certain GC state transitions. 642 gcw gcWork 643 644 // wbBuf is this P's GC write barrier buffer. 645 // 646 // TODO: Consider caching this in the running G. 647 wbBuf wbBuf 648 649 runSafePointFn uint32 // if 1, run sched.safePointFn at next safe point 650 651 // Lock for timers. We normally access the timers while running 652 // on this P, but the scheduler can also do it from a different P. 653 timersLock mutex 654 655 // Actions to take at some time. This is used to implement the 656 // standard library's time package. 657 // Must hold timersLock to access. 658 timers []*timer 659 660 // Number of timers in P's heap. 661 // Modified using atomic instructions. 662 numTimers uint32 663 664 // Number of timerModifiedEarlier timers on P's heap. 665 // This should only be modified while holding timersLock, 666 // or while the timer status is in a transient state 667 // such as timerModifying. 668 adjustTimers uint32 669 670 // Number of timerDeleted timers in P's heap. 671 // Modified using atomic instructions. 672 deletedTimers uint32 673 674 // Race context used while executing timer functions. 675 timerRaceCtx uintptr 676 677 // preempt is set to indicate that this P should be enter the 678 // scheduler ASAP (regardless of what G is running on it). 679 preempt bool 680 681 pad cpu.CacheLinePad 682 } 683 684 type schedt struct { 685 // accessed atomically. keep at top to ensure alignment on 32-bit systems. 686 goidgen uint64 687 lastpoll uint64 // time of last network poll, 0 if currently polling 688 pollUntil uint64 // time to which current poll is sleeping 689 690 lock mutex 691 692 // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be 693 // sure to call checkdead(). 694 695 midle muintptr // idle m's waiting for work 696 nmidle int32 // number of idle m's waiting for work 697 nmidlelocked int32 // number of locked m's waiting for work 698 mnext int64 // number of m's that have been created and next M ID 699 maxmcount int32 // maximum number of m's allowed (or die) 700 nmsys int32 // number of system m's not counted for deadlock 701 nmfreed int64 // cumulative number of freed m's 702 703 ngsys uint32 // number of system goroutines; updated atomically 704 705 pidle puintptr // idle p's 706 npidle uint32 707 nmspinning uint32 // See "Worker thread parking/unparking" comment in proc.go. 708 709 // Global runnable queue. 710 runq gQueue 711 runqsize int32 712 713 // disable controls selective disabling of the scheduler. 714 // 715 // Use schedEnableUser to control this. 716 // 717 // disable is protected by sched.lock. 718 disable struct { 719 // user disables scheduling of user goroutines. 720 user bool 721 runnable gQueue // pending runnable Gs 722 n int32 // length of runnable 723 } 724 725 // Global cache of dead G's. 726 gFree struct { 727 lock mutex 728 stack gList // Gs with stacks 729 noStack gList // Gs without stacks 730 n int32 731 } 732 733 // Central cache of sudog structs. 734 sudoglock mutex 735 sudogcache *sudog 736 737 // Central pool of available defer structs of different sizes. 738 deferlock mutex 739 deferpool [5]*_defer 740 741 // freem is the list of m's waiting to be freed when their 742 // m.exited is set. Linked through m.freelink. 743 freem *m 744 745 gcwaiting uint32 // gc is waiting to run 746 stopwait int32 747 stopnote note 748 sysmonwait uint32 749 sysmonnote note 750 751 // safepointFn should be called on each P at the next GC 752 // safepoint if p.runSafePointFn is set. 753 safePointFn func(*p) 754 safePointWait int32 755 safePointNote note 756 757 profilehz int32 // cpu profiling rate 758 759 procresizetime int64 // nanotime() of last change to gomaxprocs 760 totaltime int64 // ∫gomaxprocs dt up to procresizetime 761 } 762 763 // Values for the flags field of a sigTabT. 764 const ( 765 _SigNotify = 1 << iota // let signal.Notify have signal, even if from kernel 766 _SigKill // if signal.Notify doesn't take it, exit quietly 767 _SigThrow // if signal.Notify doesn't take it, exit loudly 768 _SigPanic // if the signal is from the kernel, panic 769 _SigDefault // if the signal isn't explicitly requested, don't monitor it 770 _SigGoExit // cause all runtime procs to exit (only used on Plan 9). 771 _SigSetStack // add SA_ONSTACK to libc handler 772 _SigUnblock // always unblock; see blockableSig 773 _SigIgn // _SIG_DFL action is to ignore the signal 774 ) 775 776 // Layout of in-memory per-function information prepared by linker 777 // See https://golang.org/s/go12symtab. 778 // Keep in sync with linker (../cmd/link/internal/ld/pcln.go:/pclntab) 779 // and with package debug/gosym and with symtab.go in package runtime. 780 type _func struct { 781 entry uintptr // start pc 782 nameoff int32 // function name 783 784 args int32 // in/out args size 785 deferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any. 786 787 pcsp int32 788 pcfile int32 789 pcln int32 790 npcdata int32 791 funcID funcID // set for certain special runtime functions 792 _ [2]int8 // unused 793 nfuncdata uint8 // must be last 794 } 795 796 // Pseudo-Func that is returned for PCs that occur in inlined code. 797 // A *Func can be either a *_func or a *funcinl, and they are distinguished 798 // by the first uintptr. 799 type funcinl struct { 800 zero uintptr // set to 0 to distinguish from _func 801 entry uintptr // entry of the real (the "outermost") frame. 802 name string 803 file string 804 line int 805 } 806 807 // layout of Itab known to compilers 808 // allocated in non-garbage-collected memory 809 // Needs to be in sync with 810 // ../cmd/compile/internal/gc/reflect.go:/^func.dumptabs. 811 type itab struct { 812 inter *interfacetype 813 _type *_type 814 hash uint32 // copy of _type.hash. Used for type switches. 815 _ [4]byte 816 fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter. 817 } 818 819 // Lock-free stack node. 820 // Also known to export_test.go. 821 type lfnode struct { 822 next uint64 823 pushcnt uintptr 824 } 825 826 type forcegcstate struct { 827 lock mutex 828 g *g 829 idle uint32 830 } 831 832 // startup_random_data holds random bytes initialized at startup. These come from 833 // the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go). 834 var startupRandomData []byte 835 836 // extendRandom extends the random numbers in r[:n] to the whole slice r. 837 // Treats n<0 as n==0. 838 func extendRandom(r []byte, n int) { 839 if n < 0 { 840 n = 0 841 } 842 for n < len(r) { 843 // Extend random bits using hash function & time seed 844 w := n 845 if w > 16 { 846 w = 16 847 } 848 h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w)) 849 for i := 0; i < sys.PtrSize && n < len(r); i++ { 850 r[n] = byte(h) 851 n++ 852 h >>= 8 853 } 854 } 855 } 856 857 // A _defer holds an entry on the list of deferred calls. 858 // If you add a field here, add code to clear it in freedefer and deferProcStack 859 // This struct must match the code in cmd/compile/internal/gc/reflect.go:deferstruct 860 // and cmd/compile/internal/gc/ssa.go:(*state).call. 861 // Some defers will be allocated on the stack and some on the heap. 862 // All defers are logically part of the stack, so write barriers to 863 // initialize them are not required. All defers must be manually scanned, 864 // and for heap defers, marked. 865 type _defer struct { 866 siz int32 // includes both arguments and results 867 started bool 868 heap bool 869 // openDefer indicates that this _defer is for a frame with open-coded 870 // defers. We have only one defer record for the entire frame (which may 871 // currently have 0, 1, or more defers active). 872 openDefer bool 873 sp uintptr // sp at time of defer 874 pc uintptr // pc at time of defer 875 fn *funcval // can be nil for open-coded defers 876 _panic *_panic // panic that is running defer 877 link *_defer 878 879 // If openDefer is true, the fields below record values about the stack 880 // frame and associated function that has the open-coded defer(s). sp 881 // above will be the sp for the frame, and pc will be address of the 882 // deferreturn call in the function. 883 fd unsafe.Pointer // funcdata for the function associated with the frame 884 varp uintptr // value of varp for the stack frame 885 // framepc is the current pc associated with the stack frame. Together, 886 // with sp above (which is the sp associated with the stack frame), 887 // framepc/sp can be used as pc/sp pair to continue a stack trace via 888 // gentraceback(). 889 framepc uintptr 890 } 891 892 // A _panic holds information about an active panic. 893 // 894 // This is marked go:notinheap because _panic values must only ever 895 // live on the stack. 896 // 897 // The argp and link fields are stack pointers, but don't need special 898 // handling during stack growth: because they are pointer-typed and 899 // _panic values only live on the stack, regular stack pointer 900 // adjustment takes care of them. 901 // 902 //go:notinheap 903 type _panic struct { 904 argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink 905 arg interface{} // argument to panic 906 link *_panic // link to earlier panic 907 pc uintptr // where to return to in runtime if this panic is bypassed 908 sp unsafe.Pointer // where to return to in runtime if this panic is bypassed 909 recovered bool // whether this panic is over 910 aborted bool // the panic was aborted 911 goexit bool 912 } 913 914 // stack traces 915 type stkframe struct { 916 fn funcInfo // function being run 917 pc uintptr // program counter within fn 918 continpc uintptr // program counter where execution can continue, or 0 if not 919 lr uintptr // program counter at caller aka link register 920 sp uintptr // stack pointer at pc 921 fp uintptr // stack pointer at caller aka frame pointer 922 varp uintptr // top of local variables 923 argp uintptr // pointer to function arguments 924 arglen uintptr // number of bytes at argp 925 argmap *bitvector // force use of this argmap 926 } 927 928 // ancestorInfo records details of where a goroutine was started. 929 type ancestorInfo struct { 930 pcs []uintptr // pcs from the stack of this goroutine 931 goid int64 // goroutine id of this goroutine; original goroutine possibly dead 932 gopc uintptr // pc of go statement that created this goroutine 933 } 934 935 const ( 936 _TraceRuntimeFrames = 1 << iota // include frames for internal runtime functions. 937 _TraceTrap // the initial PC, SP are from a trap, not a return PC from a call 938 _TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it 939 ) 940 941 // The maximum number of frames we print for a traceback 942 const _TracebackMaxFrames = 100 943 944 // A waitReason explains why a goroutine has been stopped. 945 // See gopark. Do not re-use waitReasons, add new ones. 946 type waitReason uint8 947 948 const ( 949 waitReasonZero waitReason = iota // "" 950 waitReasonGCAssistMarking // "GC assist marking" 951 waitReasonIOWait // "IO wait" 952 waitReasonChanReceiveNilChan // "chan receive (nil chan)" 953 waitReasonChanSendNilChan // "chan send (nil chan)" 954 waitReasonDumpingHeap // "dumping heap" 955 waitReasonGarbageCollection // "garbage collection" 956 waitReasonGarbageCollectionScan // "garbage collection scan" 957 waitReasonPanicWait // "panicwait" 958 waitReasonSelect // "select" 959 waitReasonSelectNoCases // "select (no cases)" 960 waitReasonGCAssistWait // "GC assist wait" 961 waitReasonGCSweepWait // "GC sweep wait" 962 waitReasonGCScavengeWait // "GC scavenge wait" 963 waitReasonChanReceive // "chan receive" 964 waitReasonChanSend // "chan send" 965 waitReasonFinalizerWait // "finalizer wait" 966 waitReasonForceGGIdle // "force gc (idle)" 967 waitReasonSemacquire // "semacquire" 968 waitReasonSleep // "sleep" 969 waitReasonSyncCondWait // "sync.Cond.Wait" 970 waitReasonTimerGoroutineIdle // "timer goroutine (idle)" 971 waitReasonTraceReaderBlocked // "trace reader (blocked)" 972 waitReasonWaitForGCCycle // "wait for GC cycle" 973 waitReasonGCWorkerIdle // "GC worker (idle)" 974 waitReasonPreempted // "preempted" 975 ) 976 977 var waitReasonStrings = [...]string{ 978 waitReasonZero: "", 979 waitReasonGCAssistMarking: "GC assist marking", 980 waitReasonIOWait: "IO wait", 981 waitReasonChanReceiveNilChan: "chan receive (nil chan)", 982 waitReasonChanSendNilChan: "chan send (nil chan)", 983 waitReasonDumpingHeap: "dumping heap", 984 waitReasonGarbageCollection: "garbage collection", 985 waitReasonGarbageCollectionScan: "garbage collection scan", 986 waitReasonPanicWait: "panicwait", 987 waitReasonSelect: "select", 988 waitReasonSelectNoCases: "select (no cases)", 989 waitReasonGCAssistWait: "GC assist wait", 990 waitReasonGCSweepWait: "GC sweep wait", 991 waitReasonGCScavengeWait: "GC scavenge wait", 992 waitReasonChanReceive: "chan receive", 993 waitReasonChanSend: "chan send", 994 waitReasonFinalizerWait: "finalizer wait", 995 waitReasonForceGGIdle: "force gc (idle)", 996 waitReasonSemacquire: "semacquire", 997 waitReasonSleep: "sleep", 998 waitReasonSyncCondWait: "sync.Cond.Wait", 999 waitReasonTimerGoroutineIdle: "timer goroutine (idle)", 1000 waitReasonTraceReaderBlocked: "trace reader (blocked)", 1001 waitReasonWaitForGCCycle: "wait for GC cycle", 1002 waitReasonGCWorkerIdle: "GC worker (idle)", 1003 waitReasonPreempted: "preempted", 1004 } 1005 1006 func (w waitReason) String() string { 1007 if w < 0 || w >= waitReason(len(waitReasonStrings)) { 1008 return "unknown wait reason" 1009 } 1010 return waitReasonStrings[w] 1011 } 1012 1013 var ( 1014 allglen uintptr 1015 allm *m 1016 allp []*p // len(allp) == gomaxprocs; may change at safe points, otherwise immutable 1017 allpLock mutex // Protects P-less reads of allp and all writes 1018 gomaxprocs int32 1019 ncpu int32 1020 forcegc forcegcstate 1021 sched schedt 1022 newprocs int32 1023 1024 // Information about what cpu features are available. 1025 // Packages outside the runtime should not use these 1026 // as they are not an external api. 1027 // Set on startup in asm_{386,amd64}.s 1028 processorVersionInfo uint32 1029 isIntel bool 1030 lfenceBeforeRdtsc bool 1031 1032 goarm uint8 // set by cmd/link on arm systems 1033 framepointer_enabled bool // set by cmd/link 1034 ) 1035 1036 // Set by the linker so the runtime can determine the buildmode. 1037 var ( 1038 islibrary bool // -buildmode=c-shared 1039 isarchive bool // -buildmode=c-archive 1040 )