github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/bytealg" 9 "internal/cpu" 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 var buildVersion = sys.TheVersion 16 17 // set using cmd/go/internal/modload.ModInfoProg 18 var modinfo string 19 20 // Goroutine scheduler 21 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 22 // 23 // The main concepts are: 24 // G - goroutine. 25 // M - worker thread, or machine. 26 // P - processor, a resource that is required to execute Go code. 27 // M must have an associated P to execute Go code, however it can be 28 // blocked or in a syscall w/o an associated P. 29 // 30 // Design doc at https://golang.org/s/go11sched. 31 32 // Worker thread parking/unparking. 33 // We need to balance between keeping enough running worker threads to utilize 34 // available hardware parallelism and parking excessive running worker threads 35 // to conserve CPU resources and power. This is not simple for two reasons: 36 // (1) scheduler state is intentionally distributed (in particular, per-P work 37 // queues), so it is not possible to compute global predicates on fast paths; 38 // (2) for optimal thread management we would need to know the future (don't park 39 // a worker thread when a new goroutine will be readied in near future). 40 // 41 // Three rejected approaches that would work badly: 42 // 1. Centralize all scheduler state (would inhibit scalability). 43 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there 44 // is a spare P, unpark a thread and handoff it the thread and the goroutine. 45 // This would lead to thread state thrashing, as the thread that readied the 46 // goroutine can be out of work the very next moment, we will need to park it. 47 // Also, it would destroy locality of computation as we want to preserve 48 // dependent goroutines on the same thread; and introduce additional latency. 49 // 3. Unpark an additional thread whenever we ready a goroutine and there is an 50 // idle P, but don't do handoff. This would lead to excessive thread parking/ 51 // unparking as the additional threads will instantly park without discovering 52 // any work to do. 53 // 54 // The current approach: 55 // We unpark an additional thread when we ready a goroutine if (1) there is an 56 // idle P and there are no "spinning" worker threads. A worker thread is considered 57 // spinning if it is out of local work and did not find work in global run queue/ 58 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. 59 // Threads unparked this way are also considered spinning; we don't do goroutine 60 // handoff so such threads are out of work initially. Spinning threads do some 61 // spinning looking for work in per-P run queues before parking. If a spinning 62 // thread finds work it takes itself out of the spinning state and proceeds to 63 // execution. If it does not find work it takes itself out of the spinning state 64 // and then parks. 65 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark 66 // new threads when readying goroutines. To compensate for that, if the last spinning 67 // thread finds work and stops spinning, it must unpark a new spinning thread. 68 // This approach smooths out unjustified spikes of thread unparking, 69 // but at the same time guarantees eventual maximal CPU parallelism utilization. 70 // 71 // The main implementation complication is that we need to be very careful during 72 // spinning->non-spinning thread transition. This transition can race with submission 73 // of a new goroutine, and either one part or another needs to unpark another worker 74 // thread. If they both fail to do that, we can end up with semi-persistent CPU 75 // underutilization. The general pattern for goroutine readying is: submit a goroutine 76 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. 77 // The general pattern for spinning->non-spinning transition is: decrement nmspinning, 78 // #StoreLoad-style memory barrier, check all per-P work queues for new work. 79 // Note that all this complexity does not apply to global run queue as we are not 80 // sloppy about thread unparking when submitting to global queue. Also see comments 81 // for nmspinning manipulation. 82 83 var ( 84 m0 m 85 g0 g 86 mcache0 *mcache 87 raceprocctx0 uintptr 88 ) 89 90 //go:linkname runtime_inittask runtime..inittask 91 var runtime_inittask initTask 92 93 //go:linkname main_inittask main..inittask 94 var main_inittask initTask 95 96 // main_init_done is a signal used by cgocallbackg that initialization 97 // has been completed. It is made before _cgo_notify_runtime_init_done, 98 // so all cgo calls can rely on it existing. When main_init is complete, 99 // it is closed, meaning cgocallbackg can reliably receive from it. 100 var main_init_done chan bool 101 102 //go:linkname main_main main.main 103 func main_main() 104 105 // mainStarted indicates that the main M has started. 106 var mainStarted bool 107 108 // runtimeInitTime is the nanotime() at which the runtime started. 109 var runtimeInitTime int64 110 111 // Value to use for signal mask for newly created M's. 112 var initSigmask sigset 113 114 // The main goroutine. 115 func main() { 116 g := getg() 117 118 // Racectx of m0->g0 is used only as the parent of the main goroutine. 119 // It must not be used for anything else. 120 g.m.g0.racectx = 0 121 122 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 123 // Using decimal instead of binary GB and MB because 124 // they look nicer in the stack overflow failure message. 125 if sys.PtrSize == 8 { 126 maxstacksize = 1000000000 127 } else { 128 maxstacksize = 250000000 129 } 130 131 // An upper limit for max stack size. Used to avoid random crashes 132 // after calling SetMaxStack and trying to allocate a stack that is too big, 133 // since stackalloc works with 32-bit sizes. 134 maxstackceiling = 2 * maxstacksize 135 136 // Allow newproc to start new Ms. 137 mainStarted = true 138 139 if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon 140 // For runtime_syscall_doAllThreadsSyscall, we 141 // register sysmon is not ready for the world to be 142 // stopped. 143 atomic.Store(&sched.sysmonStarting, 1) 144 systemstack(func() { 145 newm(sysmon, nil, -1) 146 }) 147 } 148 149 // Lock the main goroutine onto this, the main OS thread, 150 // during initialization. Most programs won't care, but a few 151 // do require certain calls to be made by the main thread. 152 // Those can arrange for main.main to run in the main thread 153 // by calling runtime.LockOSThread during initialization 154 // to preserve the lock. 155 lockOSThread() 156 157 if g.m != &m0 { 158 throw("runtime.main not on m0") 159 } 160 m0.doesPark = true 161 162 // Record when the world started. 163 // Must be before doInit for tracing init. 164 runtimeInitTime = nanotime() 165 if runtimeInitTime == 0 { 166 throw("nanotime returning zero") 167 } 168 169 if debug.inittrace != 0 { 170 inittrace.id = getg().goid 171 inittrace.active = true 172 } 173 174 doInit(&runtime_inittask) // Must be before defer. 175 176 // Defer unlock so that runtime.Goexit during init does the unlock too. 177 needUnlock := true 178 defer func() { 179 if needUnlock { 180 unlockOSThread() 181 } 182 }() 183 184 gcenable() 185 186 main_init_done = make(chan bool) 187 if iscgo { 188 if _cgo_thread_start == nil { 189 throw("_cgo_thread_start missing") 190 } 191 if GOOS != "windows" { 192 if _cgo_setenv == nil { 193 throw("_cgo_setenv missing") 194 } 195 if _cgo_unsetenv == nil { 196 throw("_cgo_unsetenv missing") 197 } 198 } 199 if _cgo_notify_runtime_init_done == nil { 200 throw("_cgo_notify_runtime_init_done missing") 201 } 202 // Start the template thread in case we enter Go from 203 // a C-created thread and need to create a new thread. 204 startTemplateThread() 205 cgocall(_cgo_notify_runtime_init_done, nil) 206 } 207 208 doInit(&main_inittask) 209 210 // Disable init tracing after main init done to avoid overhead 211 // of collecting statistics in malloc and newproc 212 inittrace.active = false 213 214 close(main_init_done) 215 216 needUnlock = false 217 unlockOSThread() 218 219 if isarchive || islibrary { 220 // A program compiled with -buildmode=c-archive or c-shared 221 // has a main, but it is not executed. 222 return 223 } 224 fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 225 fn() 226 if raceenabled { 227 racefini() 228 } 229 230 // Make racy client program work: if panicking on 231 // another goroutine at the same time as main returns, 232 // let the other goroutine finish printing the panic trace. 233 // Once it does, it will exit. See issues 3934 and 20018. 234 if atomic.Load(&runningPanicDefers) != 0 { 235 // Running deferred functions should not take long. 236 for c := 0; c < 1000; c++ { 237 if atomic.Load(&runningPanicDefers) == 0 { 238 break 239 } 240 Gosched() 241 } 242 } 243 if atomic.Load(&panicking) != 0 { 244 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1) 245 } 246 247 exit(0) 248 for { 249 var x *int32 250 *x = 0 251 } 252 } 253 254 // os_beforeExit is called from os.Exit(0). 255 //go:linkname os_beforeExit os.runtime_beforeExit 256 func os_beforeExit() { 257 if raceenabled { 258 racefini() 259 } 260 } 261 262 // start forcegc helper goroutine 263 func init() { 264 go forcegchelper() 265 } 266 267 func forcegchelper() { 268 forcegc.g = getg() 269 lockInit(&forcegc.lock, lockRankForcegc) 270 for { 271 lock(&forcegc.lock) 272 if forcegc.idle != 0 { 273 throw("forcegc: phase error") 274 } 275 atomic.Store(&forcegc.idle, 1) 276 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1) 277 // this goroutine is explicitly resumed by sysmon 278 if debug.gctrace > 0 { 279 println("GC forced") 280 } 281 // Time-triggered, fully concurrent. 282 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()}) 283 } 284 } 285 286 //go:nosplit 287 288 // Gosched yields the processor, allowing other goroutines to run. It does not 289 // suspend the current goroutine, so execution resumes automatically. 290 func Gosched() { 291 checkTimeouts() 292 mcall(gosched_m) 293 } 294 295 // goschedguarded yields the processor like gosched, but also checks 296 // for forbidden states and opts out of the yield in those cases. 297 //go:nosplit 298 func goschedguarded() { 299 mcall(goschedguarded_m) 300 } 301 302 // Puts the current goroutine into a waiting state and calls unlockf on the 303 // system stack. 304 // 305 // If unlockf returns false, the goroutine is resumed. 306 // 307 // unlockf must not access this G's stack, as it may be moved between 308 // the call to gopark and the call to unlockf. 309 // 310 // Note that because unlockf is called after putting the G into a waiting 311 // state, the G may have already been readied by the time unlockf is called 312 // unless there is external synchronization preventing the G from being 313 // readied. If unlockf returns false, it must guarantee that the G cannot be 314 // externally readied. 315 // 316 // Reason explains why the goroutine has been parked. It is displayed in stack 317 // traces and heap dumps. Reasons should be unique and descriptive. Do not 318 // re-use reasons, add new ones. 319 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) { 320 if reason != waitReasonSleep { 321 checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy 322 } 323 mp := acquirem() 324 gp := mp.curg 325 status := readgstatus(gp) 326 if status != _Grunning && status != _Gscanrunning { 327 throw("gopark: bad g status") 328 } 329 mp.waitlock = lock 330 mp.waitunlockf = unlockf 331 gp.waitreason = reason 332 mp.waittraceev = traceEv 333 mp.waittraceskip = traceskip 334 releasem(mp) 335 // can't do anything that might move the G between Ms here. 336 mcall(park_m) 337 } 338 339 // Puts the current goroutine into a waiting state and unlocks the lock. 340 // The goroutine can be made runnable again by calling goready(gp). 341 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) { 342 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 343 } 344 345 func goready(gp *g, traceskip int) { 346 systemstack(func() { 347 ready(gp, traceskip, true) 348 }) 349 } 350 351 //go:nosplit 352 func acquireSudog() *sudog { 353 // Delicate dance: the semaphore implementation calls 354 // acquireSudog, acquireSudog calls new(sudog), 355 // new calls malloc, malloc can call the garbage collector, 356 // and the garbage collector calls the semaphore implementation 357 // in stopTheWorld. 358 // Break the cycle by doing acquirem/releasem around new(sudog). 359 // The acquirem/releasem increments m.locks during new(sudog), 360 // which keeps the garbage collector from being invoked. 361 mp := acquirem() 362 pp := mp.p.ptr() 363 if len(pp.sudogcache) == 0 { 364 lock(&sched.sudoglock) 365 // First, try to grab a batch from central cache. 366 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 367 s := sched.sudogcache 368 sched.sudogcache = s.next 369 s.next = nil 370 pp.sudogcache = append(pp.sudogcache, s) 371 } 372 unlock(&sched.sudoglock) 373 // If the central cache is empty, allocate a new one. 374 if len(pp.sudogcache) == 0 { 375 pp.sudogcache = append(pp.sudogcache, new(sudog)) 376 } 377 } 378 n := len(pp.sudogcache) 379 s := pp.sudogcache[n-1] 380 pp.sudogcache[n-1] = nil 381 pp.sudogcache = pp.sudogcache[:n-1] 382 if s.elem != nil { 383 throw("acquireSudog: found s.elem != nil in cache") 384 } 385 releasem(mp) 386 return s 387 } 388 389 //go:nosplit 390 func releaseSudog(s *sudog) { 391 if s.elem != nil { 392 throw("runtime: sudog with non-nil elem") 393 } 394 if s.isSelect { 395 throw("runtime: sudog with non-false isSelect") 396 } 397 if s.next != nil { 398 throw("runtime: sudog with non-nil next") 399 } 400 if s.prev != nil { 401 throw("runtime: sudog with non-nil prev") 402 } 403 if s.waitlink != nil { 404 throw("runtime: sudog with non-nil waitlink") 405 } 406 if s.c != nil { 407 throw("runtime: sudog with non-nil c") 408 } 409 gp := getg() 410 if gp.param != nil { 411 throw("runtime: releaseSudog with non-nil gp.param") 412 } 413 mp := acquirem() // avoid rescheduling to another P 414 pp := mp.p.ptr() 415 if len(pp.sudogcache) == cap(pp.sudogcache) { 416 // Transfer half of local cache to the central cache. 417 var first, last *sudog 418 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 419 n := len(pp.sudogcache) 420 p := pp.sudogcache[n-1] 421 pp.sudogcache[n-1] = nil 422 pp.sudogcache = pp.sudogcache[:n-1] 423 if first == nil { 424 first = p 425 } else { 426 last.next = p 427 } 428 last = p 429 } 430 lock(&sched.sudoglock) 431 last.next = sched.sudogcache 432 sched.sudogcache = first 433 unlock(&sched.sudoglock) 434 } 435 pp.sudogcache = append(pp.sudogcache, s) 436 releasem(mp) 437 } 438 439 // funcPC returns the entry PC of the function f. 440 // It assumes that f is a func value. Otherwise the behavior is undefined. 441 // CAREFUL: In programs with plugins, funcPC can return different values 442 // for the same function (because there are actually multiple copies of 443 // the same function in the address space). To be safe, don't use the 444 // results of this function in any == expression. It is only safe to 445 // use the result as an address at which to start executing code. 446 //go:nosplit 447 func funcPC(f interface{}) uintptr { 448 return *(*uintptr)(efaceOf(&f).data) 449 } 450 451 // called from assembly 452 func badmcall(fn func(*g)) { 453 throw("runtime: mcall called on m->g0 stack") 454 } 455 456 func badmcall2(fn func(*g)) { 457 throw("runtime: mcall function returned") 458 } 459 460 func badreflectcall() { 461 panic(plainError("arg size to reflect.call more than 1GB")) 462 } 463 464 var badmorestackg0Msg = "fatal: morestack on g0\n" 465 466 //go:nosplit 467 //go:nowritebarrierrec 468 func badmorestackg0() { 469 sp := stringStructOf(&badmorestackg0Msg) 470 write(2, sp.str, int32(sp.len)) 471 } 472 473 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n" 474 475 //go:nosplit 476 //go:nowritebarrierrec 477 func badmorestackgsignal() { 478 sp := stringStructOf(&badmorestackgsignalMsg) 479 write(2, sp.str, int32(sp.len)) 480 } 481 482 //go:nosplit 483 func badctxt() { 484 throw("ctxt != 0") 485 } 486 487 func lockedOSThread() bool { 488 gp := getg() 489 return gp.lockedm != 0 && gp.m.lockedg != 0 490 } 491 492 var ( 493 // allgs contains all Gs ever created (including dead Gs), and thus 494 // never shrinks. 495 // 496 // Access via the slice is protected by allglock or stop-the-world. 497 // Readers that cannot take the lock may (carefully!) use the atomic 498 // variables below. 499 allglock mutex 500 allgs []*g 501 502 // allglen and allgptr are atomic variables that contain len(allg) and 503 // &allg[0] respectively. Proper ordering depends on totally-ordered 504 // loads and stores. Writes are protected by allglock. 505 // 506 // allgptr is updated before allglen. Readers should read allglen 507 // before allgptr to ensure that allglen is always <= len(allgptr). New 508 // Gs appended during the race can be missed. For a consistent view of 509 // all Gs, allglock must be held. 510 // 511 // allgptr copies should always be stored as a concrete type or 512 // unsafe.Pointer, not uintptr, to ensure that GC can still reach it 513 // even if it points to a stale array. 514 allglen uintptr 515 allgptr **g 516 ) 517 518 func allgadd(gp *g) { 519 if readgstatus(gp) == _Gidle { 520 throw("allgadd: bad status Gidle") 521 } 522 523 lock(&allglock) 524 allgs = append(allgs, gp) 525 if &allgs[0] != allgptr { 526 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0])) 527 } 528 atomic.Storeuintptr(&allglen, uintptr(len(allgs))) 529 unlock(&allglock) 530 } 531 532 // atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex. 533 func atomicAllG() (**g, uintptr) { 534 length := atomic.Loaduintptr(&allglen) 535 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr))) 536 return ptr, length 537 } 538 539 // atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG. 540 func atomicAllGIndex(ptr **g, i uintptr) *g { 541 return *(**g)(add(unsafe.Pointer(ptr), i*sys.PtrSize)) 542 } 543 544 const ( 545 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 546 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 547 _GoidCacheBatch = 16 548 ) 549 550 // cpuinit extracts the environment variable GODEBUG from the environment on 551 // Unix-like operating systems and calls internal/cpu.Initialize. 552 func cpuinit() { 553 const prefix = "GODEBUG=" 554 var env string 555 556 switch GOOS { 557 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux": 558 cpu.DebugOptions = true 559 560 // Similar to goenv_unix but extracts the environment value for 561 // GODEBUG directly. 562 // TODO(moehrmann): remove when general goenvs() can be called before cpuinit() 563 n := int32(0) 564 for argv_index(argv, argc+1+n) != nil { 565 n++ 566 } 567 568 for i := int32(0); i < n; i++ { 569 p := argv_index(argv, argc+1+i) 570 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)})) 571 572 if hasPrefix(s, prefix) { 573 env = gostring(p)[len(prefix):] 574 break 575 } 576 } 577 } 578 579 cpu.Initialize(env) 580 581 // Support cpu feature variables are used in code generated by the compiler 582 // to guard execution of instructions that can not be assumed to be always supported. 583 x86HasPOPCNT = cpu.X86.HasPOPCNT 584 x86HasSSE41 = cpu.X86.HasSSE41 585 x86HasFMA = cpu.X86.HasFMA 586 587 armHasVFPv4 = cpu.ARM.HasVFPv4 588 589 arm64HasATOMICS = cpu.ARM64.HasATOMICS 590 } 591 592 // The bootstrap sequence is: 593 // 594 // call osinit 595 // call schedinit 596 // make & queue new G 597 // call runtime·mstart 598 // 599 // The new G calls runtime·main. 600 func schedinit() { 601 lockInit(&sched.lock, lockRankSched) 602 lockInit(&sched.sysmonlock, lockRankSysmon) 603 lockInit(&sched.deferlock, lockRankDefer) 604 lockInit(&sched.sudoglock, lockRankSudog) 605 lockInit(&deadlock, lockRankDeadlock) 606 lockInit(&paniclk, lockRankPanic) 607 lockInit(&allglock, lockRankAllg) 608 lockInit(&allpLock, lockRankAllp) 609 lockInit(&reflectOffs.lock, lockRankReflectOffs) 610 lockInit(&finlock, lockRankFin) 611 lockInit(&trace.bufLock, lockRankTraceBuf) 612 lockInit(&trace.stringsLock, lockRankTraceStrings) 613 lockInit(&trace.lock, lockRankTrace) 614 lockInit(&cpuprof.lock, lockRankCpuprof) 615 lockInit(&trace.stackTab.lock, lockRankTraceStackTab) 616 // Enforce that this lock is always a leaf lock. 617 // All of this lock's critical sections should be 618 // extremely short. 619 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank) 620 621 // raceinit must be the first call to race detector. 622 // In particular, it must be done before mallocinit below calls racemapshadow. 623 _g_ := getg() 624 if raceenabled { 625 _g_.racectx, raceprocctx0 = raceinit() 626 } 627 628 sched.maxmcount = 10000 629 630 // The world starts stopped. 631 worldStopped() 632 633 moduledataverify() 634 stackinit() 635 mallocinit() 636 fastrandinit() // must run before mcommoninit 637 mcommoninit(_g_.m, -1) 638 cpuinit() // must run before alginit 639 alginit() // maps must not be used before this call 640 modulesinit() // provides activeModules 641 typelinksinit() // uses maps, activeModules 642 itabsinit() // uses activeModules 643 644 sigsave(&_g_.m.sigmask) 645 initSigmask = _g_.m.sigmask 646 647 goargs() 648 goenvs() 649 parsedebugvars() 650 gcinit() 651 652 lock(&sched.lock) 653 sched.lastpoll = uint64(nanotime()) 654 procs := ncpu 655 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { 656 procs = n 657 } 658 if procresize(procs) != nil { 659 throw("unknown runnable goroutine during bootstrap") 660 } 661 unlock(&sched.lock) 662 663 // World is effectively started now, as P's can run. 664 worldStarted() 665 666 // For cgocheck > 1, we turn on the write barrier at all times 667 // and check all pointer writes. We can't do this until after 668 // procresize because the write barrier needs a P. 669 if debug.cgocheck > 1 { 670 writeBarrier.cgo = true 671 writeBarrier.enabled = true 672 for _, p := range allp { 673 p.wbBuf.reset() 674 } 675 } 676 677 if buildVersion == "" { 678 // Condition should never trigger. This code just serves 679 // to ensure runtime·buildVersion is kept in the resulting binary. 680 buildVersion = "unknown" 681 } 682 if len(modinfo) == 1 { 683 // Condition should never trigger. This code just serves 684 // to ensure runtime·modinfo is kept in the resulting binary. 685 modinfo = "" 686 } 687 } 688 689 func dumpgstatus(gp *g) { 690 _g_ := getg() 691 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 692 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 693 } 694 695 // sched.lock must be held. 696 func checkmcount() { 697 assertLockHeld(&sched.lock) 698 699 if mcount() > sched.maxmcount { 700 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 701 throw("thread exhaustion") 702 } 703 } 704 705 // mReserveID returns the next ID to use for a new m. This new m is immediately 706 // considered 'running' by checkdead. 707 // 708 // sched.lock must be held. 709 func mReserveID() int64 { 710 assertLockHeld(&sched.lock) 711 712 if sched.mnext+1 < sched.mnext { 713 throw("runtime: thread ID overflow") 714 } 715 id := sched.mnext 716 sched.mnext++ 717 checkmcount() 718 return id 719 } 720 721 // Pre-allocated ID may be passed as 'id', or omitted by passing -1. 722 func mcommoninit(mp *m, id int64) { 723 _g_ := getg() 724 725 // g0 stack won't make sense for user (and is not necessary unwindable). 726 if _g_ != _g_.m.g0 { 727 callers(1, mp.createstack[:]) 728 } 729 730 lock(&sched.lock) 731 732 if id >= 0 { 733 mp.id = id 734 } else { 735 mp.id = mReserveID() 736 } 737 738 mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed)) 739 mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed)) 740 if mp.fastrand[0]|mp.fastrand[1] == 0 { 741 mp.fastrand[1] = 1 742 } 743 744 mpreinit(mp) 745 if mp.gsignal != nil { 746 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 747 } 748 749 // Add to allm so garbage collector doesn't free g->m 750 // when it is just in a register or thread-local storage. 751 mp.alllink = allm 752 753 // NumCgoCall() iterates over allm w/o schedlock, 754 // so we need to publish it safely. 755 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 756 unlock(&sched.lock) 757 758 // Allocate memory to hold a cgo traceback if the cgo call crashes. 759 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" { 760 mp.cgoCallers = new(cgoCallers) 761 } 762 } 763 764 var fastrandseed uintptr 765 766 func fastrandinit() { 767 s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:] 768 getRandomData(s) 769 } 770 771 // Mark gp ready to run. 772 func ready(gp *g, traceskip int, next bool) { 773 if trace.enabled { 774 traceGoUnpark(gp, traceskip) 775 } 776 777 status := readgstatus(gp) 778 779 // Mark runnable. 780 _g_ := getg() 781 mp := acquirem() // disable preemption because it can be holding p in a local var 782 if status&^_Gscan != _Gwaiting { 783 dumpgstatus(gp) 784 throw("bad g->status in ready") 785 } 786 787 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 788 casgstatus(gp, _Gwaiting, _Grunnable) 789 runqput(_g_.m.p.ptr(), gp, next) 790 wakep() 791 releasem(mp) 792 } 793 794 // freezeStopWait is a large value that freezetheworld sets 795 // sched.stopwait to in order to request that all Gs permanently stop. 796 const freezeStopWait = 0x7fffffff 797 798 // freezing is set to non-zero if the runtime is trying to freeze the 799 // world. 800 var freezing uint32 801 802 // Similar to stopTheWorld but best-effort and can be called several times. 803 // There is no reverse operation, used during crashing. 804 // This function must not lock any mutexes. 805 func freezetheworld() { 806 atomic.Store(&freezing, 1) 807 // stopwait and preemption requests can be lost 808 // due to races with concurrently executing threads, 809 // so try several times 810 for i := 0; i < 5; i++ { 811 // this should tell the scheduler to not start any new goroutines 812 sched.stopwait = freezeStopWait 813 atomic.Store(&sched.gcwaiting, 1) 814 // this should stop running goroutines 815 if !preemptall() { 816 break // no running goroutines 817 } 818 usleep(1000) 819 } 820 // to be sure 821 usleep(1000) 822 preemptall() 823 usleep(1000) 824 } 825 826 // All reads and writes of g's status go through readgstatus, casgstatus 827 // castogscanstatus, casfrom_Gscanstatus. 828 //go:nosplit 829 func readgstatus(gp *g) uint32 { 830 return atomic.Load(&gp.atomicstatus) 831 } 832 833 // The Gscanstatuses are acting like locks and this releases them. 834 // If it proves to be a performance hit we should be able to make these 835 // simple atomic stores but for now we are going to throw if 836 // we see an inconsistent state. 837 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 838 success := false 839 840 // Check that transition is valid. 841 switch oldval { 842 default: 843 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 844 dumpgstatus(gp) 845 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 846 case _Gscanrunnable, 847 _Gscanwaiting, 848 _Gscanrunning, 849 _Gscansyscall, 850 _Gscanpreempted: 851 if newval == oldval&^_Gscan { 852 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 853 } 854 } 855 if !success { 856 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 857 dumpgstatus(gp) 858 throw("casfrom_Gscanstatus: gp->status is not in scan state") 859 } 860 releaseLockRank(lockRankGscan) 861 } 862 863 // This will return false if the gp is not in the expected status and the cas fails. 864 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 865 func castogscanstatus(gp *g, oldval, newval uint32) bool { 866 switch oldval { 867 case _Grunnable, 868 _Grunning, 869 _Gwaiting, 870 _Gsyscall: 871 if newval == oldval|_Gscan { 872 r := atomic.Cas(&gp.atomicstatus, oldval, newval) 873 if r { 874 acquireLockRank(lockRankGscan) 875 } 876 return r 877 878 } 879 } 880 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 881 throw("castogscanstatus") 882 panic("not reached") 883 } 884 885 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 886 // and casfrom_Gscanstatus instead. 887 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 888 // put it in the Gscan state is finished. 889 //go:nosplit 890 func casgstatus(gp *g, oldval, newval uint32) { 891 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 892 systemstack(func() { 893 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 894 throw("casgstatus: bad incoming values") 895 }) 896 } 897 898 acquireLockRank(lockRankGscan) 899 releaseLockRank(lockRankGscan) 900 901 // See https://golang.org/cl/21503 for justification of the yield delay. 902 const yieldDelay = 5 * 1000 903 var nextYield int64 904 905 // loop if gp->atomicstatus is in a scan state giving 906 // GC time to finish and change the state to oldval. 907 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { 908 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 909 throw("casgstatus: waiting for Gwaiting but is Grunnable") 910 } 911 if i == 0 { 912 nextYield = nanotime() + yieldDelay 913 } 914 if nanotime() < nextYield { 915 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { 916 procyield(1) 917 } 918 } else { 919 osyield() 920 nextYield = nanotime() + yieldDelay/2 921 } 922 } 923 } 924 925 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 926 // Returns old status. Cannot call casgstatus directly, because we are racing with an 927 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 928 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 929 // it would loop waiting for the status to go back to Gwaiting, which it never will. 930 //go:nosplit 931 func casgcopystack(gp *g) uint32 { 932 for { 933 oldstatus := readgstatus(gp) &^ _Gscan 934 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 935 throw("copystack: bad status, not Gwaiting or Grunnable") 936 } 937 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 938 return oldstatus 939 } 940 } 941 } 942 943 // casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted. 944 // 945 // TODO(austin): This is the only status operation that both changes 946 // the status and locks the _Gscan bit. Rethink this. 947 func casGToPreemptScan(gp *g, old, new uint32) { 948 if old != _Grunning || new != _Gscan|_Gpreempted { 949 throw("bad g transition") 950 } 951 acquireLockRank(lockRankGscan) 952 for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) { 953 } 954 } 955 956 // casGFromPreempted attempts to transition gp from _Gpreempted to 957 // _Gwaiting. If successful, the caller is responsible for 958 // re-scheduling gp. 959 func casGFromPreempted(gp *g, old, new uint32) bool { 960 if old != _Gpreempted || new != _Gwaiting { 961 throw("bad g transition") 962 } 963 return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting) 964 } 965 966 // stopTheWorld stops all P's from executing goroutines, interrupting 967 // all goroutines at GC safe points and records reason as the reason 968 // for the stop. On return, only the current goroutine's P is running. 969 // stopTheWorld must not be called from a system stack and the caller 970 // must not hold worldsema. The caller must call startTheWorld when 971 // other P's should resume execution. 972 // 973 // stopTheWorld is safe for multiple goroutines to call at the 974 // same time. Each will execute its own stop, and the stops will 975 // be serialized. 976 // 977 // This is also used by routines that do stack dumps. If the system is 978 // in panic or being exited, this may not reliably stop all 979 // goroutines. 980 func stopTheWorld(reason string) { 981 semacquire(&worldsema) 982 gp := getg() 983 gp.m.preemptoff = reason 984 systemstack(func() { 985 // Mark the goroutine which called stopTheWorld preemptible so its 986 // stack may be scanned. 987 // This lets a mark worker scan us while we try to stop the world 988 // since otherwise we could get in a mutual preemption deadlock. 989 // We must not modify anything on the G stack because a stack shrink 990 // may occur. A stack shrink is otherwise OK though because in order 991 // to return from this function (and to leave the system stack) we 992 // must have preempted all goroutines, including any attempting 993 // to scan our stack, in which case, any stack shrinking will 994 // have already completed by the time we exit. 995 casgstatus(gp, _Grunning, _Gwaiting) 996 stopTheWorldWithSema() 997 casgstatus(gp, _Gwaiting, _Grunning) 998 }) 999 } 1000 1001 // startTheWorld undoes the effects of stopTheWorld. 1002 func startTheWorld() { 1003 systemstack(func() { startTheWorldWithSema(false) }) 1004 1005 // worldsema must be held over startTheWorldWithSema to ensure 1006 // gomaxprocs cannot change while worldsema is held. 1007 // 1008 // Release worldsema with direct handoff to the next waiter, but 1009 // acquirem so that semrelease1 doesn't try to yield our time. 1010 // 1011 // Otherwise if e.g. ReadMemStats is being called in a loop, 1012 // it might stomp on other attempts to stop the world, such as 1013 // for starting or ending GC. The operation this blocks is 1014 // so heavy-weight that we should just try to be as fair as 1015 // possible here. 1016 // 1017 // We don't want to just allow us to get preempted between now 1018 // and releasing the semaphore because then we keep everyone 1019 // (including, for example, GCs) waiting longer. 1020 mp := acquirem() 1021 mp.preemptoff = "" 1022 semrelease1(&worldsema, true, 0) 1023 releasem(mp) 1024 } 1025 1026 // stopTheWorldGC has the same effect as stopTheWorld, but blocks 1027 // until the GC is not running. It also blocks a GC from starting 1028 // until startTheWorldGC is called. 1029 func stopTheWorldGC(reason string) { 1030 semacquire(&gcsema) 1031 stopTheWorld(reason) 1032 } 1033 1034 // startTheWorldGC undoes the effects of stopTheWorldGC. 1035 func startTheWorldGC() { 1036 startTheWorld() 1037 semrelease(&gcsema) 1038 } 1039 1040 // Holding worldsema grants an M the right to try to stop the world. 1041 var worldsema uint32 = 1 1042 1043 // Holding gcsema grants the M the right to block a GC, and blocks 1044 // until the current GC is done. In particular, it prevents gomaxprocs 1045 // from changing concurrently. 1046 // 1047 // TODO(mknyszek): Once gomaxprocs and the execution tracer can handle 1048 // being changed/enabled during a GC, remove this. 1049 var gcsema uint32 = 1 1050 1051 // stopTheWorldWithSema is the core implementation of stopTheWorld. 1052 // The caller is responsible for acquiring worldsema and disabling 1053 // preemption first and then should stopTheWorldWithSema on the system 1054 // stack: 1055 // 1056 // semacquire(&worldsema, 0) 1057 // m.preemptoff = "reason" 1058 // systemstack(stopTheWorldWithSema) 1059 // 1060 // When finished, the caller must either call startTheWorld or undo 1061 // these three operations separately: 1062 // 1063 // m.preemptoff = "" 1064 // systemstack(startTheWorldWithSema) 1065 // semrelease(&worldsema) 1066 // 1067 // It is allowed to acquire worldsema once and then execute multiple 1068 // startTheWorldWithSema/stopTheWorldWithSema pairs. 1069 // Other P's are able to execute between successive calls to 1070 // startTheWorldWithSema and stopTheWorldWithSema. 1071 // Holding worldsema causes any other goroutines invoking 1072 // stopTheWorld to block. 1073 func stopTheWorldWithSema() { 1074 _g_ := getg() 1075 1076 // If we hold a lock, then we won't be able to stop another M 1077 // that is blocked trying to acquire the lock. 1078 if _g_.m.locks > 0 { 1079 throw("stopTheWorld: holding locks") 1080 } 1081 1082 lock(&sched.lock) 1083 sched.stopwait = gomaxprocs 1084 atomic.Store(&sched.gcwaiting, 1) 1085 preemptall() 1086 // stop current P 1087 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 1088 sched.stopwait-- 1089 // try to retake all P's in Psyscall status 1090 for _, p := range allp { 1091 s := p.status 1092 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 1093 if trace.enabled { 1094 traceGoSysBlock(p) 1095 traceProcStop(p) 1096 } 1097 p.syscalltick++ 1098 sched.stopwait-- 1099 } 1100 } 1101 // stop idle P's 1102 for { 1103 p := pidleget() 1104 if p == nil { 1105 break 1106 } 1107 p.status = _Pgcstop 1108 sched.stopwait-- 1109 } 1110 wait := sched.stopwait > 0 1111 unlock(&sched.lock) 1112 1113 // wait for remaining P's to stop voluntarily 1114 if wait { 1115 for { 1116 // wait for 100us, then try to re-preempt in case of any races 1117 if notetsleep(&sched.stopnote, 100*1000) { 1118 noteclear(&sched.stopnote) 1119 break 1120 } 1121 preemptall() 1122 } 1123 } 1124 1125 // sanity checks 1126 bad := "" 1127 if sched.stopwait != 0 { 1128 bad = "stopTheWorld: not stopped (stopwait != 0)" 1129 } else { 1130 for _, p := range allp { 1131 if p.status != _Pgcstop { 1132 bad = "stopTheWorld: not stopped (status != _Pgcstop)" 1133 } 1134 } 1135 } 1136 if atomic.Load(&freezing) != 0 { 1137 // Some other thread is panicking. This can cause the 1138 // sanity checks above to fail if the panic happens in 1139 // the signal handler on a stopped thread. Either way, 1140 // we should halt this thread. 1141 lock(&deadlock) 1142 lock(&deadlock) 1143 } 1144 if bad != "" { 1145 throw(bad) 1146 } 1147 1148 worldStopped() 1149 } 1150 1151 func startTheWorldWithSema(emitTraceEvent bool) int64 { 1152 assertWorldStopped() 1153 1154 mp := acquirem() // disable preemption because it can be holding p in a local var 1155 if netpollinited() { 1156 list := netpoll(0) // non-blocking 1157 injectglist(&list) 1158 } 1159 lock(&sched.lock) 1160 1161 procs := gomaxprocs 1162 if newprocs != 0 { 1163 procs = newprocs 1164 newprocs = 0 1165 } 1166 p1 := procresize(procs) 1167 sched.gcwaiting = 0 1168 if sched.sysmonwait != 0 { 1169 sched.sysmonwait = 0 1170 notewakeup(&sched.sysmonnote) 1171 } 1172 unlock(&sched.lock) 1173 1174 worldStarted() 1175 1176 for p1 != nil { 1177 p := p1 1178 p1 = p1.link.ptr() 1179 if p.m != 0 { 1180 mp := p.m.ptr() 1181 p.m = 0 1182 if mp.nextp != 0 { 1183 throw("startTheWorld: inconsistent mp->nextp") 1184 } 1185 mp.nextp.set(p) 1186 notewakeup(&mp.park) 1187 } else { 1188 // Start M to run P. Do not start another M below. 1189 newm(nil, p, -1) 1190 } 1191 } 1192 1193 // Capture start-the-world time before doing clean-up tasks. 1194 startTime := nanotime() 1195 if emitTraceEvent { 1196 traceGCSTWDone() 1197 } 1198 1199 // Wakeup an additional proc in case we have excessive runnable goroutines 1200 // in local queues or in the global queue. If we don't, the proc will park itself. 1201 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 1202 wakep() 1203 1204 releasem(mp) 1205 1206 return startTime 1207 } 1208 1209 // usesLibcall indicates whether this runtime performs system calls 1210 // via libcall. 1211 func usesLibcall() bool { 1212 switch GOOS { 1213 case "aix", "darwin", "illumos", "ios", "solaris", "windows": 1214 return true 1215 case "openbsd": 1216 return GOARCH == "amd64" || GOARCH == "arm64" 1217 } 1218 return false 1219 } 1220 1221 // mStackIsSystemAllocated indicates whether this runtime starts on a 1222 // system-allocated stack. 1223 func mStackIsSystemAllocated() bool { 1224 switch GOOS { 1225 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows": 1226 return true 1227 case "openbsd": 1228 switch GOARCH { 1229 case "amd64", "arm64": 1230 return true 1231 } 1232 } 1233 return false 1234 } 1235 1236 // mstart is the entry-point for new Ms. 1237 // 1238 // This must not split the stack because we may not even have stack 1239 // bounds set up yet. 1240 // 1241 // May run during STW (because it doesn't have a P yet), so write 1242 // barriers are not allowed. 1243 // 1244 //go:nosplit 1245 //go:nowritebarrierrec 1246 func mstart() { 1247 _g_ := getg() 1248 1249 osStack := _g_.stack.lo == 0 1250 if osStack { 1251 // Initialize stack bounds from system stack. 1252 // Cgo may have left stack size in stack.hi. 1253 // minit may update the stack bounds. 1254 // 1255 // Note: these bounds may not be very accurate. 1256 // We set hi to &size, but there are things above 1257 // it. The 1024 is supposed to compensate this, 1258 // but is somewhat arbitrary. 1259 size := _g_.stack.hi 1260 if size == 0 { 1261 size = 8192 * sys.StackGuardMultiplier 1262 } 1263 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1264 _g_.stack.lo = _g_.stack.hi - size + 1024 1265 } 1266 // Initialize stack guard so that we can start calling regular 1267 // Go code. 1268 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1269 // This is the g0, so we can also call go:systemstack 1270 // functions, which check stackguard1. 1271 _g_.stackguard1 = _g_.stackguard0 1272 mstart1() 1273 1274 // Exit this thread. 1275 if mStackIsSystemAllocated() { 1276 // Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate 1277 // the stack, but put it in _g_.stack before mstart, 1278 // so the logic above hasn't set osStack yet. 1279 osStack = true 1280 } 1281 mexit(osStack) 1282 } 1283 1284 func mstart1() { 1285 _g_ := getg() 1286 1287 if _g_ != _g_.m.g0 { 1288 throw("bad runtime·mstart") 1289 } 1290 1291 // Record the caller for use as the top of stack in mcall and 1292 // for terminating the thread. 1293 // We're never coming back to mstart1 after we call schedule, 1294 // so other calls can reuse the current frame. 1295 save(getcallerpc(), getcallersp()) 1296 asminit() 1297 minit() 1298 1299 // Install signal handlers; after minit so that minit can 1300 // prepare the thread to be able to handle the signals. 1301 if _g_.m == &m0 { 1302 mstartm0() 1303 } 1304 1305 if fn := _g_.m.mstartfn; fn != nil { 1306 fn() 1307 } 1308 1309 if _g_.m != &m0 { 1310 acquirep(_g_.m.nextp.ptr()) 1311 _g_.m.nextp = 0 1312 } 1313 schedule() 1314 } 1315 1316 // mstartm0 implements part of mstart1 that only runs on the m0. 1317 // 1318 // Write barriers are allowed here because we know the GC can't be 1319 // running yet, so they'll be no-ops. 1320 // 1321 //go:yeswritebarrierrec 1322 func mstartm0() { 1323 // Create an extra M for callbacks on threads not created by Go. 1324 // An extra M is also needed on Windows for callbacks created by 1325 // syscall.NewCallback. See issue #6751 for details. 1326 if (iscgo || GOOS == "windows") && !cgoHasExtraM { 1327 cgoHasExtraM = true 1328 newextram() 1329 } 1330 initsig(false) 1331 } 1332 1333 // mPark causes a thread to park itself - temporarily waking for 1334 // fixups but otherwise waiting to be fully woken. This is the 1335 // only way that m's should park themselves. 1336 //go:nosplit 1337 func mPark() { 1338 g := getg() 1339 for { 1340 notesleep(&g.m.park) 1341 // Note, because of signal handling by this parked m, 1342 // a preemptive mDoFixup() may actually occur via 1343 // mDoFixupAndOSYield(). (See golang.org/issue/44193) 1344 noteclear(&g.m.park) 1345 if !mDoFixup() { 1346 return 1347 } 1348 } 1349 } 1350 1351 // mexit tears down and exits the current thread. 1352 // 1353 // Don't call this directly to exit the thread, since it must run at 1354 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to 1355 // unwind the stack to the point that exits the thread. 1356 // 1357 // It is entered with m.p != nil, so write barriers are allowed. It 1358 // will release the P before exiting. 1359 // 1360 //go:yeswritebarrierrec 1361 func mexit(osStack bool) { 1362 g := getg() 1363 m := g.m 1364 1365 if m == &m0 { 1366 // This is the main thread. Just wedge it. 1367 // 1368 // On Linux, exiting the main thread puts the process 1369 // into a non-waitable zombie state. On Plan 9, 1370 // exiting the main thread unblocks wait even though 1371 // other threads are still running. On Solaris we can 1372 // neither exitThread nor return from mstart. Other 1373 // bad things probably happen on other platforms. 1374 // 1375 // We could try to clean up this M more before wedging 1376 // it, but that complicates signal handling. 1377 handoffp(releasep()) 1378 lock(&sched.lock) 1379 sched.nmfreed++ 1380 checkdead() 1381 unlock(&sched.lock) 1382 mPark() 1383 throw("locked m0 woke up") 1384 } 1385 1386 sigblock(true) 1387 unminit() 1388 1389 // Free the gsignal stack. 1390 if m.gsignal != nil { 1391 stackfree(m.gsignal.stack) 1392 // On some platforms, when calling into VDSO (e.g. nanotime) 1393 // we store our g on the gsignal stack, if there is one. 1394 // Now the stack is freed, unlink it from the m, so we 1395 // won't write to it when calling VDSO code. 1396 m.gsignal = nil 1397 } 1398 1399 // Remove m from allm. 1400 lock(&sched.lock) 1401 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink { 1402 if *pprev == m { 1403 *pprev = m.alllink 1404 goto found 1405 } 1406 } 1407 throw("m not found in allm") 1408 found: 1409 if !osStack { 1410 // Delay reaping m until it's done with the stack. 1411 // 1412 // If this is using an OS stack, the OS will free it 1413 // so there's no need for reaping. 1414 atomic.Store(&m.freeWait, 1) 1415 // Put m on the free list, though it will not be reaped until 1416 // freeWait is 0. Note that the free list must not be linked 1417 // through alllink because some functions walk allm without 1418 // locking, so may be using alllink. 1419 m.freelink = sched.freem 1420 sched.freem = m 1421 } 1422 unlock(&sched.lock) 1423 1424 // Release the P. 1425 handoffp(releasep()) 1426 // After this point we must not have write barriers. 1427 1428 // Invoke the deadlock detector. This must happen after 1429 // handoffp because it may have started a new M to take our 1430 // P's work. 1431 lock(&sched.lock) 1432 sched.nmfreed++ 1433 checkdead() 1434 unlock(&sched.lock) 1435 1436 if GOOS == "darwin" || GOOS == "ios" { 1437 // Make sure pendingPreemptSignals is correct when an M exits. 1438 // For #41702. 1439 if atomic.Load(&m.signalPending) != 0 { 1440 atomic.Xadd(&pendingPreemptSignals, -1) 1441 } 1442 } 1443 1444 // Destroy all allocated resources. After this is called, we may no 1445 // longer take any locks. 1446 mdestroy(m) 1447 1448 if osStack { 1449 // Return from mstart and let the system thread 1450 // library free the g0 stack and terminate the thread. 1451 return 1452 } 1453 1454 // mstart is the thread's entry point, so there's nothing to 1455 // return to. Exit the thread directly. exitThread will clear 1456 // m.freeWait when it's done with the stack and the m can be 1457 // reaped. 1458 exitThread(&m.freeWait) 1459 } 1460 1461 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1462 // If a P is currently executing code, this will bring the P to a GC 1463 // safe point and execute fn on that P. If the P is not executing code 1464 // (it is idle or in a syscall), this will call fn(p) directly while 1465 // preventing the P from exiting its state. This does not ensure that 1466 // fn will run on every CPU executing Go code, but it acts as a global 1467 // memory barrier. GC uses this as a "ragged barrier." 1468 // 1469 // The caller must hold worldsema. 1470 // 1471 //go:systemstack 1472 func forEachP(fn func(*p)) { 1473 mp := acquirem() 1474 _p_ := getg().m.p.ptr() 1475 1476 lock(&sched.lock) 1477 if sched.safePointWait != 0 { 1478 throw("forEachP: sched.safePointWait != 0") 1479 } 1480 sched.safePointWait = gomaxprocs - 1 1481 sched.safePointFn = fn 1482 1483 // Ask all Ps to run the safe point function. 1484 for _, p := range allp { 1485 if p != _p_ { 1486 atomic.Store(&p.runSafePointFn, 1) 1487 } 1488 } 1489 preemptall() 1490 1491 // Any P entering _Pidle or _Psyscall from now on will observe 1492 // p.runSafePointFn == 1 and will call runSafePointFn when 1493 // changing its status to _Pidle/_Psyscall. 1494 1495 // Run safe point function for all idle Ps. sched.pidle will 1496 // not change because we hold sched.lock. 1497 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1498 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1499 fn(p) 1500 sched.safePointWait-- 1501 } 1502 } 1503 1504 wait := sched.safePointWait > 0 1505 unlock(&sched.lock) 1506 1507 // Run fn for the current P. 1508 fn(_p_) 1509 1510 // Force Ps currently in _Psyscall into _Pidle and hand them 1511 // off to induce safe point function execution. 1512 for _, p := range allp { 1513 s := p.status 1514 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1515 if trace.enabled { 1516 traceGoSysBlock(p) 1517 traceProcStop(p) 1518 } 1519 p.syscalltick++ 1520 handoffp(p) 1521 } 1522 } 1523 1524 // Wait for remaining Ps to run fn. 1525 if wait { 1526 for { 1527 // Wait for 100us, then try to re-preempt in 1528 // case of any races. 1529 // 1530 // Requires system stack. 1531 if notetsleep(&sched.safePointNote, 100*1000) { 1532 noteclear(&sched.safePointNote) 1533 break 1534 } 1535 preemptall() 1536 } 1537 } 1538 if sched.safePointWait != 0 { 1539 throw("forEachP: not done") 1540 } 1541 for _, p := range allp { 1542 if p.runSafePointFn != 0 { 1543 throw("forEachP: P did not run fn") 1544 } 1545 } 1546 1547 lock(&sched.lock) 1548 sched.safePointFn = nil 1549 unlock(&sched.lock) 1550 releasem(mp) 1551 } 1552 1553 // syscall_runtime_doAllThreadsSyscall serializes Go execution and 1554 // executes a specified fn() call on all m's. 1555 // 1556 // The boolean argument to fn() indicates whether the function's 1557 // return value will be consulted or not. That is, fn(true) should 1558 // return true if fn() succeeds, and fn(true) should return false if 1559 // it failed. When fn(false) is called, its return status will be 1560 // ignored. 1561 // 1562 // syscall_runtime_doAllThreadsSyscall first invokes fn(true) on a 1563 // single, coordinating, m, and only if it returns true does it go on 1564 // to invoke fn(false) on all of the other m's known to the process. 1565 // 1566 //go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall 1567 func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) { 1568 if iscgo { 1569 panic("doAllThreadsSyscall not supported with cgo enabled") 1570 } 1571 if fn == nil { 1572 return 1573 } 1574 for atomic.Load(&sched.sysmonStarting) != 0 { 1575 osyield() 1576 } 1577 1578 // We don't want this thread to handle signals for the 1579 // duration of this critical section. The underlying issue 1580 // being that this locked coordinating m is the one monitoring 1581 // for fn() execution by all the other m's of the runtime, 1582 // while no regular go code execution is permitted (the world 1583 // is stopped). If this present m were to get distracted to 1584 // run signal handling code, and find itself waiting for a 1585 // second thread to execute go code before being able to 1586 // return from that signal handling, a deadlock will result. 1587 // (See golang.org/issue/44193.) 1588 lockOSThread() 1589 var sigmask sigset 1590 sigsave(&sigmask) 1591 sigblock(false) 1592 1593 stopTheWorldGC("doAllThreadsSyscall") 1594 if atomic.Load(&newmHandoff.haveTemplateThread) != 0 { 1595 // Ensure that there are no in-flight thread 1596 // creations: don't want to race with allm. 1597 lock(&newmHandoff.lock) 1598 for !newmHandoff.waiting { 1599 unlock(&newmHandoff.lock) 1600 osyield() 1601 lock(&newmHandoff.lock) 1602 } 1603 unlock(&newmHandoff.lock) 1604 } 1605 if netpollinited() { 1606 netpollBreak() 1607 } 1608 sigRecvPrepareForFixup() 1609 _g_ := getg() 1610 if raceenabled { 1611 // For m's running without racectx, we loan out the 1612 // racectx of this call. 1613 lock(&mFixupRace.lock) 1614 mFixupRace.ctx = _g_.racectx 1615 unlock(&mFixupRace.lock) 1616 } 1617 if ok := fn(true); ok { 1618 tid := _g_.m.procid 1619 for mp := allm; mp != nil; mp = mp.alllink { 1620 if mp.procid == tid { 1621 // This m has already completed fn() 1622 // call. 1623 continue 1624 } 1625 // Be wary of mp's without procid values if 1626 // they are known not to park. If they are 1627 // marked as parking with a zero procid, then 1628 // they will be racing with this code to be 1629 // allocated a procid and we will annotate 1630 // them with the need to execute the fn when 1631 // they acquire a procid to run it. 1632 if mp.procid == 0 && !mp.doesPark { 1633 // Reaching here, we are either 1634 // running Windows, or cgo linked 1635 // code. Neither of which are 1636 // currently supported by this API. 1637 throw("unsupported runtime environment") 1638 } 1639 // stopTheWorldGC() doesn't guarantee stopping 1640 // all the threads, so we lock here to avoid 1641 // the possibility of racing with mp. 1642 lock(&mp.mFixup.lock) 1643 mp.mFixup.fn = fn 1644 atomic.Store(&mp.mFixup.used, 1) 1645 if mp.doesPark { 1646 // For non-service threads this will 1647 // cause the wakeup to be short lived 1648 // (once the mutex is unlocked). The 1649 // next real wakeup will occur after 1650 // startTheWorldGC() is called. 1651 notewakeup(&mp.park) 1652 } 1653 unlock(&mp.mFixup.lock) 1654 } 1655 for { 1656 done := true 1657 for mp := allm; done && mp != nil; mp = mp.alllink { 1658 if mp.procid == tid { 1659 continue 1660 } 1661 done = atomic.Load(&mp.mFixup.used) == 0 1662 } 1663 if done { 1664 break 1665 } 1666 // if needed force sysmon and/or newmHandoff to wakeup. 1667 lock(&sched.lock) 1668 if atomic.Load(&sched.sysmonwait) != 0 { 1669 atomic.Store(&sched.sysmonwait, 0) 1670 notewakeup(&sched.sysmonnote) 1671 } 1672 unlock(&sched.lock) 1673 lock(&newmHandoff.lock) 1674 if newmHandoff.waiting { 1675 newmHandoff.waiting = false 1676 notewakeup(&newmHandoff.wake) 1677 } 1678 unlock(&newmHandoff.lock) 1679 osyield() 1680 } 1681 } 1682 if raceenabled { 1683 lock(&mFixupRace.lock) 1684 mFixupRace.ctx = 0 1685 unlock(&mFixupRace.lock) 1686 } 1687 startTheWorldGC() 1688 msigrestore(sigmask) 1689 unlockOSThread() 1690 } 1691 1692 // runSafePointFn runs the safe point function, if any, for this P. 1693 // This should be called like 1694 // 1695 // if getg().m.p.runSafePointFn != 0 { 1696 // runSafePointFn() 1697 // } 1698 // 1699 // runSafePointFn must be checked on any transition in to _Pidle or 1700 // _Psyscall to avoid a race where forEachP sees that the P is running 1701 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1702 // nor the P run the safe-point function. 1703 func runSafePointFn() { 1704 p := getg().m.p.ptr() 1705 // Resolve the race between forEachP running the safe-point 1706 // function on this P's behalf and this P running the 1707 // safe-point function directly. 1708 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1709 return 1710 } 1711 sched.safePointFn(p) 1712 lock(&sched.lock) 1713 sched.safePointWait-- 1714 if sched.safePointWait == 0 { 1715 notewakeup(&sched.safePointNote) 1716 } 1717 unlock(&sched.lock) 1718 } 1719 1720 // When running with cgo, we call _cgo_thread_start 1721 // to start threads for us so that we can play nicely with 1722 // foreign code. 1723 var cgoThreadStart unsafe.Pointer 1724 1725 type cgothreadstart struct { 1726 g guintptr 1727 tls *uint64 1728 fn unsafe.Pointer 1729 } 1730 1731 // Allocate a new m unassociated with any thread. 1732 // Can use p for allocation context if needed. 1733 // fn is recorded as the new m's m.mstartfn. 1734 // id is optional pre-allocated m ID. Omit by passing -1. 1735 // 1736 // This function is allowed to have write barriers even if the caller 1737 // isn't because it borrows _p_. 1738 // 1739 //go:yeswritebarrierrec 1740 func allocm(_p_ *p, fn func(), id int64) *m { 1741 _g_ := getg() 1742 acquirem() // disable GC because it can be called from sysmon 1743 if _g_.m.p == 0 { 1744 acquirep(_p_) // temporarily borrow p for mallocs in this function 1745 } 1746 1747 // Release the free M list. We need to do this somewhere and 1748 // this may free up a stack we can use. 1749 if sched.freem != nil { 1750 lock(&sched.lock) 1751 var newList *m 1752 for freem := sched.freem; freem != nil; { 1753 if freem.freeWait != 0 { 1754 next := freem.freelink 1755 freem.freelink = newList 1756 newList = freem 1757 freem = next 1758 continue 1759 } 1760 // stackfree must be on the system stack, but allocm is 1761 // reachable off the system stack transitively from 1762 // startm. 1763 systemstack(func() { 1764 stackfree(freem.g0.stack) 1765 }) 1766 freem = freem.freelink 1767 } 1768 sched.freem = newList 1769 unlock(&sched.lock) 1770 } 1771 1772 mp := new(m) 1773 mp.mstartfn = fn 1774 mcommoninit(mp, id) 1775 1776 // In case of cgo or Solaris or illumos or Darwin, pthread_create will make us a stack. 1777 // Windows and Plan 9 will layout sched stack on OS stack. 1778 if iscgo || mStackIsSystemAllocated() { 1779 mp.g0 = malg(-1) 1780 } else { 1781 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1782 } 1783 mp.g0.m = mp 1784 1785 if _p_ == _g_.m.p.ptr() { 1786 releasep() 1787 } 1788 releasem(_g_.m) 1789 1790 return mp 1791 } 1792 1793 // needm is called when a cgo callback happens on a 1794 // thread without an m (a thread not created by Go). 1795 // In this case, needm is expected to find an m to use 1796 // and return with m, g initialized correctly. 1797 // Since m and g are not set now (likely nil, but see below) 1798 // needm is limited in what routines it can call. In particular 1799 // it can only call nosplit functions (textflag 7) and cannot 1800 // do any scheduling that requires an m. 1801 // 1802 // In order to avoid needing heavy lifting here, we adopt 1803 // the following strategy: there is a stack of available m's 1804 // that can be stolen. Using compare-and-swap 1805 // to pop from the stack has ABA races, so we simulate 1806 // a lock by doing an exchange (via Casuintptr) to steal the stack 1807 // head and replace the top pointer with MLOCKED (1). 1808 // This serves as a simple spin lock that we can use even 1809 // without an m. The thread that locks the stack in this way 1810 // unlocks the stack by storing a valid stack head pointer. 1811 // 1812 // In order to make sure that there is always an m structure 1813 // available to be stolen, we maintain the invariant that there 1814 // is always one more than needed. At the beginning of the 1815 // program (if cgo is in use) the list is seeded with a single m. 1816 // If needm finds that it has taken the last m off the list, its job 1817 // is - once it has installed its own m so that it can do things like 1818 // allocate memory - to create a spare m and put it on the list. 1819 // 1820 // Each of these extra m's also has a g0 and a curg that are 1821 // pressed into service as the scheduling stack and current 1822 // goroutine for the duration of the cgo callback. 1823 // 1824 // When the callback is done with the m, it calls dropm to 1825 // put the m back on the list. 1826 //go:nosplit 1827 func needm() { 1828 if (iscgo || GOOS == "windows") && !cgoHasExtraM { 1829 // Can happen if C/C++ code calls Go from a global ctor. 1830 // Can also happen on Windows if a global ctor uses a 1831 // callback created by syscall.NewCallback. See issue #6751 1832 // for details. 1833 // 1834 // Can not throw, because scheduler is not initialized yet. 1835 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1836 exit(1) 1837 } 1838 1839 // Save and block signals before getting an M. 1840 // The signal handler may call needm itself, 1841 // and we must avoid a deadlock. Also, once g is installed, 1842 // any incoming signals will try to execute, 1843 // but we won't have the sigaltstack settings and other data 1844 // set up appropriately until the end of minit, which will 1845 // unblock the signals. This is the same dance as when 1846 // starting a new m to run Go code via newosproc. 1847 var sigmask sigset 1848 sigsave(&sigmask) 1849 sigblock(false) 1850 1851 // Lock extra list, take head, unlock popped list. 1852 // nilokay=false is safe here because of the invariant above, 1853 // that the extra list always contains or will soon contain 1854 // at least one m. 1855 mp := lockextra(false) 1856 1857 // Set needextram when we've just emptied the list, 1858 // so that the eventual call into cgocallbackg will 1859 // allocate a new m for the extra list. We delay the 1860 // allocation until then so that it can be done 1861 // after exitsyscall makes sure it is okay to be 1862 // running at all (that is, there's no garbage collection 1863 // running right now). 1864 mp.needextram = mp.schedlink == 0 1865 extraMCount-- 1866 unlockextra(mp.schedlink.ptr()) 1867 1868 // Store the original signal mask for use by minit. 1869 mp.sigmask = sigmask 1870 1871 // Install g (= m->g0) and set the stack bounds 1872 // to match the current stack. We don't actually know 1873 // how big the stack is, like we don't know how big any 1874 // scheduling stack is, but we assume there's at least 32 kB, 1875 // which is more than enough for us. 1876 setg(mp.g0) 1877 _g_ := getg() 1878 _g_.stack.hi = getcallersp() + 1024 1879 _g_.stack.lo = getcallersp() - 32*1024 1880 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1881 1882 // Initialize this thread to use the m. 1883 asminit() 1884 minit() 1885 1886 // mp.curg is now a real goroutine. 1887 casgstatus(mp.curg, _Gdead, _Gsyscall) 1888 atomic.Xadd(&sched.ngsys, -1) 1889 } 1890 1891 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 1892 1893 // newextram allocates m's and puts them on the extra list. 1894 // It is called with a working local m, so that it can do things 1895 // like call schedlock and allocate. 1896 func newextram() { 1897 c := atomic.Xchg(&extraMWaiters, 0) 1898 if c > 0 { 1899 for i := uint32(0); i < c; i++ { 1900 oneNewExtraM() 1901 } 1902 } else { 1903 // Make sure there is at least one extra M. 1904 mp := lockextra(true) 1905 unlockextra(mp) 1906 if mp == nil { 1907 oneNewExtraM() 1908 } 1909 } 1910 } 1911 1912 // oneNewExtraM allocates an m and puts it on the extra list. 1913 func oneNewExtraM() { 1914 // Create extra goroutine locked to extra m. 1915 // The goroutine is the context in which the cgo callback will run. 1916 // The sched.pc will never be returned to, but setting it to 1917 // goexit makes clear to the traceback routines where 1918 // the goroutine stack ends. 1919 mp := allocm(nil, nil, -1) 1920 gp := malg(4096) 1921 gp.sched.pc = funcPC(goexit) + sys.PCQuantum 1922 gp.sched.sp = gp.stack.hi 1923 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame 1924 gp.sched.lr = 0 1925 gp.sched.g = guintptr(unsafe.Pointer(gp)) 1926 gp.syscallpc = gp.sched.pc 1927 gp.syscallsp = gp.sched.sp 1928 gp.stktopsp = gp.sched.sp 1929 // malg returns status as _Gidle. Change to _Gdead before 1930 // adding to allg where GC can see it. We use _Gdead to hide 1931 // this from tracebacks and stack scans since it isn't a 1932 // "real" goroutine until needm grabs it. 1933 casgstatus(gp, _Gidle, _Gdead) 1934 gp.m = mp 1935 mp.curg = gp 1936 mp.lockedInt++ 1937 mp.lockedg.set(gp) 1938 gp.lockedm.set(mp) 1939 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 1940 if raceenabled { 1941 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum) 1942 } 1943 // put on allg for garbage collector 1944 allgadd(gp) 1945 1946 // gp is now on the allg list, but we don't want it to be 1947 // counted by gcount. It would be more "proper" to increment 1948 // sched.ngfree, but that requires locking. Incrementing ngsys 1949 // has the same effect. 1950 atomic.Xadd(&sched.ngsys, +1) 1951 1952 // Add m to the extra list. 1953 mnext := lockextra(true) 1954 mp.schedlink.set(mnext) 1955 extraMCount++ 1956 unlockextra(mp) 1957 } 1958 1959 // dropm is called when a cgo callback has called needm but is now 1960 // done with the callback and returning back into the non-Go thread. 1961 // It puts the current m back onto the extra list. 1962 // 1963 // The main expense here is the call to signalstack to release the 1964 // m's signal stack, and then the call to needm on the next callback 1965 // from this thread. It is tempting to try to save the m for next time, 1966 // which would eliminate both these costs, but there might not be 1967 // a next time: the current thread (which Go does not control) might exit. 1968 // If we saved the m for that thread, there would be an m leak each time 1969 // such a thread exited. Instead, we acquire and release an m on each 1970 // call. These should typically not be scheduling operations, just a few 1971 // atomics, so the cost should be small. 1972 // 1973 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1974 // variable using pthread_key_create. Unlike the pthread keys we already use 1975 // on OS X, this dummy key would never be read by Go code. It would exist 1976 // only so that we could register at thread-exit-time destructor. 1977 // That destructor would put the m back onto the extra list. 1978 // This is purely a performance optimization. The current version, 1979 // in which dropm happens on each cgo call, is still correct too. 1980 // We may have to keep the current version on systems with cgo 1981 // but without pthreads, like Windows. 1982 func dropm() { 1983 // Clear m and g, and return m to the extra list. 1984 // After the call to setg we can only call nosplit functions 1985 // with no pointer manipulation. 1986 mp := getg().m 1987 1988 // Return mp.curg to dead state. 1989 casgstatus(mp.curg, _Gsyscall, _Gdead) 1990 mp.curg.preemptStop = false 1991 atomic.Xadd(&sched.ngsys, +1) 1992 1993 // Block signals before unminit. 1994 // Unminit unregisters the signal handling stack (but needs g on some systems). 1995 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 1996 // It's important not to try to handle a signal between those two steps. 1997 sigmask := mp.sigmask 1998 sigblock(false) 1999 unminit() 2000 2001 mnext := lockextra(true) 2002 extraMCount++ 2003 mp.schedlink.set(mnext) 2004 2005 setg(nil) 2006 2007 // Commit the release of mp. 2008 unlockextra(mp) 2009 2010 msigrestore(sigmask) 2011 } 2012 2013 // A helper function for EnsureDropM. 2014 func getm() uintptr { 2015 return uintptr(unsafe.Pointer(getg().m)) 2016 } 2017 2018 var extram uintptr 2019 var extraMCount uint32 // Protected by lockextra 2020 var extraMWaiters uint32 2021 2022 // lockextra locks the extra list and returns the list head. 2023 // The caller must unlock the list by storing a new list head 2024 // to extram. If nilokay is true, then lockextra will 2025 // return a nil list head if that's what it finds. If nilokay is false, 2026 // lockextra will keep waiting until the list head is no longer nil. 2027 //go:nosplit 2028 func lockextra(nilokay bool) *m { 2029 const locked = 1 2030 2031 incr := false 2032 for { 2033 old := atomic.Loaduintptr(&extram) 2034 if old == locked { 2035 osyield() 2036 continue 2037 } 2038 if old == 0 && !nilokay { 2039 if !incr { 2040 // Add 1 to the number of threads 2041 // waiting for an M. 2042 // This is cleared by newextram. 2043 atomic.Xadd(&extraMWaiters, 1) 2044 incr = true 2045 } 2046 usleep(1) 2047 continue 2048 } 2049 if atomic.Casuintptr(&extram, old, locked) { 2050 return (*m)(unsafe.Pointer(old)) 2051 } 2052 osyield() 2053 continue 2054 } 2055 } 2056 2057 //go:nosplit 2058 func unlockextra(mp *m) { 2059 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 2060 } 2061 2062 // execLock serializes exec and clone to avoid bugs or unspecified behaviour 2063 // around exec'ing while creating/destroying threads. See issue #19546. 2064 var execLock rwmutex 2065 2066 // newmHandoff contains a list of m structures that need new OS threads. 2067 // This is used by newm in situations where newm itself can't safely 2068 // start an OS thread. 2069 var newmHandoff struct { 2070 lock mutex 2071 2072 // newm points to a list of M structures that need new OS 2073 // threads. The list is linked through m.schedlink. 2074 newm muintptr 2075 2076 // waiting indicates that wake needs to be notified when an m 2077 // is put on the list. 2078 waiting bool 2079 wake note 2080 2081 // haveTemplateThread indicates that the templateThread has 2082 // been started. This is not protected by lock. Use cas to set 2083 // to 1. 2084 haveTemplateThread uint32 2085 } 2086 2087 // Create a new m. It will start off with a call to fn, or else the scheduler. 2088 // fn needs to be static and not a heap allocated closure. 2089 // May run with m.p==nil, so write barriers are not allowed. 2090 // 2091 // id is optional pre-allocated m ID. Omit by passing -1. 2092 //go:nowritebarrierrec 2093 func newm(fn func(), _p_ *p, id int64) { 2094 mp := allocm(_p_, fn, id) 2095 mp.doesPark = (_p_ != nil) 2096 mp.nextp.set(_p_) 2097 mp.sigmask = initSigmask 2098 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" { 2099 // We're on a locked M or a thread that may have been 2100 // started by C. The kernel state of this thread may 2101 // be strange (the user may have locked it for that 2102 // purpose). We don't want to clone that into another 2103 // thread. Instead, ask a known-good thread to create 2104 // the thread for us. 2105 // 2106 // This is disabled on Plan 9. See golang.org/issue/22227. 2107 // 2108 // TODO: This may be unnecessary on Windows, which 2109 // doesn't model thread creation off fork. 2110 lock(&newmHandoff.lock) 2111 if newmHandoff.haveTemplateThread == 0 { 2112 throw("on a locked thread with no template thread") 2113 } 2114 mp.schedlink = newmHandoff.newm 2115 newmHandoff.newm.set(mp) 2116 if newmHandoff.waiting { 2117 newmHandoff.waiting = false 2118 notewakeup(&newmHandoff.wake) 2119 } 2120 unlock(&newmHandoff.lock) 2121 return 2122 } 2123 newm1(mp) 2124 } 2125 2126 func newm1(mp *m) { 2127 if iscgo { 2128 var ts cgothreadstart 2129 if _cgo_thread_start == nil { 2130 throw("_cgo_thread_start missing") 2131 } 2132 ts.g.set(mp.g0) 2133 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 2134 ts.fn = unsafe.Pointer(funcPC(mstart)) 2135 if msanenabled { 2136 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 2137 } 2138 execLock.rlock() // Prevent process clone. 2139 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 2140 execLock.runlock() 2141 return 2142 } 2143 execLock.rlock() // Prevent process clone. 2144 newosproc(mp) 2145 execLock.runlock() 2146 } 2147 2148 // startTemplateThread starts the template thread if it is not already 2149 // running. 2150 // 2151 // The calling thread must itself be in a known-good state. 2152 func startTemplateThread() { 2153 if GOARCH == "wasm" { // no threads on wasm yet 2154 return 2155 } 2156 2157 // Disable preemption to guarantee that the template thread will be 2158 // created before a park once haveTemplateThread is set. 2159 mp := acquirem() 2160 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) { 2161 releasem(mp) 2162 return 2163 } 2164 newm(templateThread, nil, -1) 2165 releasem(mp) 2166 } 2167 2168 // mFixupRace is used to temporarily borrow the race context from the 2169 // coordinating m during a syscall_runtime_doAllThreadsSyscall and 2170 // loan it out to each of the m's of the runtime so they can execute a 2171 // mFixup.fn in that context. 2172 var mFixupRace struct { 2173 lock mutex 2174 ctx uintptr 2175 } 2176 2177 // mDoFixup runs any outstanding fixup function for the running m. 2178 // Returns true if a fixup was outstanding and actually executed. 2179 // 2180 // Note: to avoid deadlocks, and the need for the fixup function 2181 // itself to be async safe, signals are blocked for the working m 2182 // while it holds the mFixup lock. (See golang.org/issue/44193) 2183 // 2184 //go:nosplit 2185 func mDoFixup() bool { 2186 _g_ := getg() 2187 if used := atomic.Load(&_g_.m.mFixup.used); used == 0 { 2188 return false 2189 } 2190 2191 // slow path - if fixup fn is used, block signals and lock. 2192 var sigmask sigset 2193 sigsave(&sigmask) 2194 sigblock(false) 2195 lock(&_g_.m.mFixup.lock) 2196 fn := _g_.m.mFixup.fn 2197 if fn != nil { 2198 if gcphase != _GCoff { 2199 // We can't have a write barrier in this 2200 // context since we may not have a P, but we 2201 // clear fn to signal that we've executed the 2202 // fixup. As long as fn is kept alive 2203 // elsewhere, technically we should have no 2204 // issues with the GC, but fn is likely 2205 // generated in a different package altogether 2206 // that may change independently. Just assert 2207 // the GC is off so this lack of write barrier 2208 // is more obviously safe. 2209 throw("GC must be disabled to protect validity of fn value") 2210 } 2211 if _g_.racectx != 0 || !raceenabled { 2212 fn(false) 2213 } else { 2214 // temporarily acquire the context of the 2215 // originator of the 2216 // syscall_runtime_doAllThreadsSyscall and 2217 // block others from using it for the duration 2218 // of the fixup call. 2219 lock(&mFixupRace.lock) 2220 _g_.racectx = mFixupRace.ctx 2221 fn(false) 2222 _g_.racectx = 0 2223 unlock(&mFixupRace.lock) 2224 } 2225 *(*uintptr)(unsafe.Pointer(&_g_.m.mFixup.fn)) = 0 2226 atomic.Store(&_g_.m.mFixup.used, 0) 2227 } 2228 unlock(&_g_.m.mFixup.lock) 2229 msigrestore(sigmask) 2230 return fn != nil 2231 } 2232 2233 // mDoFixupAndOSYield is called when an m is unable to send a signal 2234 // because the allThreadsSyscall mechanism is in progress. That is, an 2235 // mPark() has been interrupted with this signal handler so we need to 2236 // ensure the fixup is executed from this context. 2237 //go:nosplit 2238 func mDoFixupAndOSYield() { 2239 mDoFixup() 2240 osyield() 2241 } 2242 2243 // templateThread is a thread in a known-good state that exists solely 2244 // to start new threads in known-good states when the calling thread 2245 // may not be in a good state. 2246 // 2247 // Many programs never need this, so templateThread is started lazily 2248 // when we first enter a state that might lead to running on a thread 2249 // in an unknown state. 2250 // 2251 // templateThread runs on an M without a P, so it must not have write 2252 // barriers. 2253 // 2254 //go:nowritebarrierrec 2255 func templateThread() { 2256 lock(&sched.lock) 2257 sched.nmsys++ 2258 checkdead() 2259 unlock(&sched.lock) 2260 2261 for { 2262 lock(&newmHandoff.lock) 2263 for newmHandoff.newm != 0 { 2264 newm := newmHandoff.newm.ptr() 2265 newmHandoff.newm = 0 2266 unlock(&newmHandoff.lock) 2267 for newm != nil { 2268 next := newm.schedlink.ptr() 2269 newm.schedlink = 0 2270 newm1(newm) 2271 newm = next 2272 } 2273 lock(&newmHandoff.lock) 2274 } 2275 newmHandoff.waiting = true 2276 noteclear(&newmHandoff.wake) 2277 unlock(&newmHandoff.lock) 2278 notesleep(&newmHandoff.wake) 2279 mDoFixup() 2280 } 2281 } 2282 2283 // Stops execution of the current m until new work is available. 2284 // Returns with acquired P. 2285 func stopm() { 2286 _g_ := getg() 2287 2288 if _g_.m.locks != 0 { 2289 throw("stopm holding locks") 2290 } 2291 if _g_.m.p != 0 { 2292 throw("stopm holding p") 2293 } 2294 if _g_.m.spinning { 2295 throw("stopm spinning") 2296 } 2297 2298 lock(&sched.lock) 2299 mput(_g_.m) 2300 unlock(&sched.lock) 2301 mPark() 2302 acquirep(_g_.m.nextp.ptr()) 2303 _g_.m.nextp = 0 2304 } 2305 2306 func mspinning() { 2307 // startm's caller incremented nmspinning. Set the new M's spinning. 2308 getg().m.spinning = true 2309 } 2310 2311 // Schedules some M to run the p (creates an M if necessary). 2312 // If p==nil, tries to get an idle P, if no idle P's does nothing. 2313 // May run with m.p==nil, so write barriers are not allowed. 2314 // If spinning is set, the caller has incremented nmspinning and startm will 2315 // either decrement nmspinning or set m.spinning in the newly started M. 2316 // 2317 // Callers passing a non-nil P must call from a non-preemptible context. See 2318 // comment on acquirem below. 2319 // 2320 // Must not have write barriers because this may be called without a P. 2321 //go:nowritebarrierrec 2322 func startm(_p_ *p, spinning bool) { 2323 // Disable preemption. 2324 // 2325 // Every owned P must have an owner that will eventually stop it in the 2326 // event of a GC stop request. startm takes transient ownership of a P 2327 // (either from argument or pidleget below) and transfers ownership to 2328 // a started M, which will be responsible for performing the stop. 2329 // 2330 // Preemption must be disabled during this transient ownership, 2331 // otherwise the P this is running on may enter GC stop while still 2332 // holding the transient P, leaving that P in limbo and deadlocking the 2333 // STW. 2334 // 2335 // Callers passing a non-nil P must already be in non-preemptible 2336 // context, otherwise such preemption could occur on function entry to 2337 // startm. Callers passing a nil P may be preemptible, so we must 2338 // disable preemption before acquiring a P from pidleget below. 2339 mp := acquirem() 2340 lock(&sched.lock) 2341 if _p_ == nil { 2342 _p_ = pidleget() 2343 if _p_ == nil { 2344 unlock(&sched.lock) 2345 if spinning { 2346 // The caller incremented nmspinning, but there are no idle Ps, 2347 // so it's okay to just undo the increment and give up. 2348 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2349 throw("startm: negative nmspinning") 2350 } 2351 } 2352 releasem(mp) 2353 return 2354 } 2355 } 2356 nmp := mget() 2357 if nmp == nil { 2358 // No M is available, we must drop sched.lock and call newm. 2359 // However, we already own a P to assign to the M. 2360 // 2361 // Once sched.lock is released, another G (e.g., in a syscall), 2362 // could find no idle P while checkdead finds a runnable G but 2363 // no running M's because this new M hasn't started yet, thus 2364 // throwing in an apparent deadlock. 2365 // 2366 // Avoid this situation by pre-allocating the ID for the new M, 2367 // thus marking it as 'running' before we drop sched.lock. This 2368 // new M will eventually run the scheduler to execute any 2369 // queued G's. 2370 id := mReserveID() 2371 unlock(&sched.lock) 2372 2373 var fn func() 2374 if spinning { 2375 // The caller incremented nmspinning, so set m.spinning in the new M. 2376 fn = mspinning 2377 } 2378 newm(fn, _p_, id) 2379 // Ownership transfer of _p_ committed by start in newm. 2380 // Preemption is now safe. 2381 releasem(mp) 2382 return 2383 } 2384 unlock(&sched.lock) 2385 if nmp.spinning { 2386 throw("startm: m is spinning") 2387 } 2388 if nmp.nextp != 0 { 2389 throw("startm: m has p") 2390 } 2391 if spinning && !runqempty(_p_) { 2392 throw("startm: p has runnable gs") 2393 } 2394 // The caller incremented nmspinning, so set m.spinning in the new M. 2395 nmp.spinning = spinning 2396 nmp.nextp.set(_p_) 2397 notewakeup(&nmp.park) 2398 // Ownership transfer of _p_ committed by wakeup. Preemption is now 2399 // safe. 2400 releasem(mp) 2401 } 2402 2403 // Hands off P from syscall or locked M. 2404 // Always runs without a P, so write barriers are not allowed. 2405 //go:nowritebarrierrec 2406 func handoffp(_p_ *p) { 2407 // handoffp must start an M in any situation where 2408 // findrunnable would return a G to run on _p_. 2409 2410 // if it has local work, start it straight away 2411 if !runqempty(_p_) || sched.runqsize != 0 { 2412 startm(_p_, false) 2413 return 2414 } 2415 // if it has GC work, start it straight away 2416 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 2417 startm(_p_, false) 2418 return 2419 } 2420 // no local work, check that there are no spinning/idle M's, 2421 // otherwise our help is not required 2422 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 2423 startm(_p_, true) 2424 return 2425 } 2426 lock(&sched.lock) 2427 if sched.gcwaiting != 0 { 2428 _p_.status = _Pgcstop 2429 sched.stopwait-- 2430 if sched.stopwait == 0 { 2431 notewakeup(&sched.stopnote) 2432 } 2433 unlock(&sched.lock) 2434 return 2435 } 2436 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 2437 sched.safePointFn(_p_) 2438 sched.safePointWait-- 2439 if sched.safePointWait == 0 { 2440 notewakeup(&sched.safePointNote) 2441 } 2442 } 2443 if sched.runqsize != 0 { 2444 unlock(&sched.lock) 2445 startm(_p_, false) 2446 return 2447 } 2448 // If this is the last running P and nobody is polling network, 2449 // need to wakeup another M to poll network. 2450 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 2451 unlock(&sched.lock) 2452 startm(_p_, false) 2453 return 2454 } 2455 2456 // The scheduler lock cannot be held when calling wakeNetPoller below 2457 // because wakeNetPoller may call wakep which may call startm. 2458 when := nobarrierWakeTime(_p_) 2459 pidleput(_p_) 2460 unlock(&sched.lock) 2461 2462 if when != 0 { 2463 wakeNetPoller(when) 2464 } 2465 } 2466 2467 // Tries to add one more P to execute G's. 2468 // Called when a G is made runnable (newproc, ready). 2469 func wakep() { 2470 if atomic.Load(&sched.npidle) == 0 { 2471 return 2472 } 2473 // be conservative about spinning threads 2474 if atomic.Load(&sched.nmspinning) != 0 || !atomic.Cas(&sched.nmspinning, 0, 1) { 2475 return 2476 } 2477 startm(nil, true) 2478 } 2479 2480 // Stops execution of the current m that is locked to a g until the g is runnable again. 2481 // Returns with acquired P. 2482 func stoplockedm() { 2483 _g_ := getg() 2484 2485 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m { 2486 throw("stoplockedm: inconsistent locking") 2487 } 2488 if _g_.m.p != 0 { 2489 // Schedule another M to run this p. 2490 _p_ := releasep() 2491 handoffp(_p_) 2492 } 2493 incidlelocked(1) 2494 // Wait until another thread schedules lockedg again. 2495 mPark() 2496 status := readgstatus(_g_.m.lockedg.ptr()) 2497 if status&^_Gscan != _Grunnable { 2498 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n") 2499 dumpgstatus(_g_.m.lockedg.ptr()) 2500 throw("stoplockedm: not runnable") 2501 } 2502 acquirep(_g_.m.nextp.ptr()) 2503 _g_.m.nextp = 0 2504 } 2505 2506 // Schedules the locked m to run the locked gp. 2507 // May run during STW, so write barriers are not allowed. 2508 //go:nowritebarrierrec 2509 func startlockedm(gp *g) { 2510 _g_ := getg() 2511 2512 mp := gp.lockedm.ptr() 2513 if mp == _g_.m { 2514 throw("startlockedm: locked to me") 2515 } 2516 if mp.nextp != 0 { 2517 throw("startlockedm: m has p") 2518 } 2519 // directly handoff current P to the locked m 2520 incidlelocked(-1) 2521 _p_ := releasep() 2522 mp.nextp.set(_p_) 2523 notewakeup(&mp.park) 2524 stopm() 2525 } 2526 2527 // Stops the current m for stopTheWorld. 2528 // Returns when the world is restarted. 2529 func gcstopm() { 2530 _g_ := getg() 2531 2532 if sched.gcwaiting == 0 { 2533 throw("gcstopm: not waiting for gc") 2534 } 2535 if _g_.m.spinning { 2536 _g_.m.spinning = false 2537 // OK to just drop nmspinning here, 2538 // startTheWorld will unpark threads as necessary. 2539 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2540 throw("gcstopm: negative nmspinning") 2541 } 2542 } 2543 _p_ := releasep() 2544 lock(&sched.lock) 2545 _p_.status = _Pgcstop 2546 sched.stopwait-- 2547 if sched.stopwait == 0 { 2548 notewakeup(&sched.stopnote) 2549 } 2550 unlock(&sched.lock) 2551 stopm() 2552 } 2553 2554 // Schedules gp to run on the current M. 2555 // If inheritTime is true, gp inherits the remaining time in the 2556 // current time slice. Otherwise, it starts a new time slice. 2557 // Never returns. 2558 // 2559 // Write barriers are allowed because this is called immediately after 2560 // acquiring a P in several places. 2561 // 2562 //go:yeswritebarrierrec 2563 func execute(gp *g, inheritTime bool) { 2564 _g_ := getg() 2565 2566 // Assign gp.m before entering _Grunning so running Gs have an 2567 // M. 2568 _g_.m.curg = gp 2569 gp.m = _g_.m 2570 casgstatus(gp, _Grunnable, _Grunning) 2571 gp.waitsince = 0 2572 gp.preempt = false 2573 gp.stackguard0 = gp.stack.lo + _StackGuard 2574 if !inheritTime { 2575 _g_.m.p.ptr().schedtick++ 2576 } 2577 2578 // Check whether the profiler needs to be turned on or off. 2579 hz := sched.profilehz 2580 if _g_.m.profilehz != hz { 2581 setThreadCPUProfiler(hz) 2582 } 2583 2584 if trace.enabled { 2585 // GoSysExit has to happen when we have a P, but before GoStart. 2586 // So we emit it here. 2587 if gp.syscallsp != 0 && gp.sysblocktraced { 2588 traceGoSysExit(gp.sysexitticks) 2589 } 2590 traceGoStart() 2591 } 2592 2593 gogo(&gp.sched) 2594 } 2595 2596 // Finds a runnable goroutine to execute. 2597 // Tries to steal from other P's, get g from local or global queue, poll network. 2598 func findrunnable() (gp *g, inheritTime bool) { 2599 _g_ := getg() 2600 2601 // The conditions here and in handoffp must agree: if 2602 // findrunnable would return a G to run, handoffp must start 2603 // an M. 2604 2605 top: 2606 _p_ := _g_.m.p.ptr() 2607 if sched.gcwaiting != 0 { 2608 gcstopm() 2609 goto top 2610 } 2611 if _p_.runSafePointFn != 0 { 2612 runSafePointFn() 2613 } 2614 2615 now, pollUntil, _ := checkTimers(_p_, 0) 2616 2617 if fingwait && fingwake { 2618 if gp := wakefing(); gp != nil { 2619 ready(gp, 0, true) 2620 } 2621 } 2622 if *cgo_yield != nil { 2623 asmcgocall(*cgo_yield, nil) 2624 } 2625 2626 // local runq 2627 if gp, inheritTime := runqget(_p_); gp != nil { 2628 return gp, inheritTime 2629 } 2630 2631 // global runq 2632 if sched.runqsize != 0 { 2633 lock(&sched.lock) 2634 gp := globrunqget(_p_, 0) 2635 unlock(&sched.lock) 2636 if gp != nil { 2637 return gp, false 2638 } 2639 } 2640 2641 // Poll network. 2642 // This netpoll is only an optimization before we resort to stealing. 2643 // We can safely skip it if there are no waiters or a thread is blocked 2644 // in netpoll already. If there is any kind of logical race with that 2645 // blocked thread (e.g. it has already returned from netpoll, but does 2646 // not set lastpoll yet), this thread will do blocking netpoll below 2647 // anyway. 2648 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 { 2649 if list := netpoll(0); !list.empty() { // non-blocking 2650 gp := list.pop() 2651 injectglist(&list) 2652 casgstatus(gp, _Gwaiting, _Grunnable) 2653 if trace.enabled { 2654 traceGoUnpark(gp, 0) 2655 } 2656 return gp, false 2657 } 2658 } 2659 2660 // Steal work from other P's. 2661 procs := uint32(gomaxprocs) 2662 ranTimer := false 2663 // If number of spinning M's >= number of busy P's, block. 2664 // This is necessary to prevent excessive CPU consumption 2665 // when GOMAXPROCS>>1 but the program parallelism is low. 2666 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { 2667 goto stop 2668 } 2669 if !_g_.m.spinning { 2670 _g_.m.spinning = true 2671 atomic.Xadd(&sched.nmspinning, 1) 2672 } 2673 const stealTries = 4 2674 for i := 0; i < stealTries; i++ { 2675 stealTimersOrRunNextG := i == stealTries-1 2676 2677 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { 2678 if sched.gcwaiting != 0 { 2679 goto top 2680 } 2681 p2 := allp[enum.position()] 2682 if _p_ == p2 { 2683 continue 2684 } 2685 2686 // Steal timers from p2. This call to checkTimers is the only place 2687 // where we might hold a lock on a different P's timers. We do this 2688 // once on the last pass before checking runnext because stealing 2689 // from the other P's runnext should be the last resort, so if there 2690 // are timers to steal do that first. 2691 // 2692 // We only check timers on one of the stealing iterations because 2693 // the time stored in now doesn't change in this loop and checking 2694 // the timers for each P more than once with the same value of now 2695 // is probably a waste of time. 2696 // 2697 // timerpMask tells us whether the P may have timers at all. If it 2698 // can't, no need to check at all. 2699 if stealTimersOrRunNextG && timerpMask.read(enum.position()) { 2700 tnow, w, ran := checkTimers(p2, now) 2701 now = tnow 2702 if w != 0 && (pollUntil == 0 || w < pollUntil) { 2703 pollUntil = w 2704 } 2705 if ran { 2706 // Running the timers may have 2707 // made an arbitrary number of G's 2708 // ready and added them to this P's 2709 // local run queue. That invalidates 2710 // the assumption of runqsteal 2711 // that is always has room to add 2712 // stolen G's. So check now if there 2713 // is a local G to run. 2714 if gp, inheritTime := runqget(_p_); gp != nil { 2715 return gp, inheritTime 2716 } 2717 ranTimer = true 2718 } 2719 } 2720 2721 // Don't bother to attempt to steal if p2 is idle. 2722 if !idlepMask.read(enum.position()) { 2723 if gp := runqsteal(_p_, p2, stealTimersOrRunNextG); gp != nil { 2724 return gp, false 2725 } 2726 } 2727 } 2728 } 2729 if ranTimer { 2730 // Running a timer may have made some goroutine ready. 2731 goto top 2732 } 2733 2734 stop: 2735 2736 // We have nothing to do. If we're in the GC mark phase, can 2737 // safely scan and blacken objects, and have work to do, run 2738 // idle-time marking rather than give up the P. 2739 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 2740 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop()) 2741 if node != nil { 2742 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 2743 gp := node.gp.ptr() 2744 casgstatus(gp, _Gwaiting, _Grunnable) 2745 if trace.enabled { 2746 traceGoUnpark(gp, 0) 2747 } 2748 return gp, false 2749 } 2750 } 2751 2752 delta := int64(-1) 2753 if pollUntil != 0 { 2754 // checkTimers ensures that polluntil > now. 2755 delta = pollUntil - now 2756 } 2757 2758 // wasm only: 2759 // If a callback returned and no other goroutine is awake, 2760 // then wake event handler goroutine which pauses execution 2761 // until a callback was triggered. 2762 gp, otherReady := beforeIdle(delta) 2763 if gp != nil { 2764 casgstatus(gp, _Gwaiting, _Grunnable) 2765 if trace.enabled { 2766 traceGoUnpark(gp, 0) 2767 } 2768 return gp, false 2769 } 2770 if otherReady { 2771 goto top 2772 } 2773 2774 // Before we drop our P, make a snapshot of the allp slice, 2775 // which can change underfoot once we no longer block 2776 // safe-points. We don't need to snapshot the contents because 2777 // everything up to cap(allp) is immutable. 2778 allpSnapshot := allp 2779 // Also snapshot masks. Value changes are OK, but we can't allow 2780 // len to change out from under us. 2781 idlepMaskSnapshot := idlepMask 2782 timerpMaskSnapshot := timerpMask 2783 2784 // return P and block 2785 lock(&sched.lock) 2786 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { 2787 unlock(&sched.lock) 2788 goto top 2789 } 2790 if sched.runqsize != 0 { 2791 gp := globrunqget(_p_, 0) 2792 unlock(&sched.lock) 2793 return gp, false 2794 } 2795 if releasep() != _p_ { 2796 throw("findrunnable: wrong p") 2797 } 2798 pidleput(_p_) 2799 unlock(&sched.lock) 2800 2801 // Delicate dance: thread transitions from spinning to non-spinning state, 2802 // potentially concurrently with submission of new goroutines. We must 2803 // drop nmspinning first and then check all per-P queues again (with 2804 // #StoreLoad memory barrier in between). If we do it the other way around, 2805 // another thread can submit a goroutine after we've checked all run queues 2806 // but before we drop nmspinning; as a result nobody will unpark a thread 2807 // to run the goroutine. 2808 // If we discover new work below, we need to restore m.spinning as a signal 2809 // for resetspinning to unpark a new worker thread (because there can be more 2810 // than one starving goroutine). However, if after discovering new work 2811 // we also observe no idle Ps, it is OK to just park the current thread: 2812 // the system is fully loaded so no spinning threads are required. 2813 // Also see "Worker thread parking/unparking" comment at the top of the file. 2814 wasSpinning := _g_.m.spinning 2815 if _g_.m.spinning { 2816 _g_.m.spinning = false 2817 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2818 throw("findrunnable: negative nmspinning") 2819 } 2820 } 2821 2822 // check all runqueues once again 2823 for id, _p_ := range allpSnapshot { 2824 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(_p_) { 2825 lock(&sched.lock) 2826 _p_ = pidleget() 2827 unlock(&sched.lock) 2828 if _p_ != nil { 2829 acquirep(_p_) 2830 if wasSpinning { 2831 _g_.m.spinning = true 2832 atomic.Xadd(&sched.nmspinning, 1) 2833 } 2834 goto top 2835 } 2836 break 2837 } 2838 } 2839 2840 // Similar to above, check for timer creation or expiry concurrently with 2841 // transitioning from spinning to non-spinning. Note that we cannot use 2842 // checkTimers here because it calls adjusttimers which may need to allocate 2843 // memory, and that isn't allowed when we don't have an active P. 2844 for id, _p_ := range allpSnapshot { 2845 if timerpMaskSnapshot.read(uint32(id)) { 2846 w := nobarrierWakeTime(_p_) 2847 if w != 0 && (pollUntil == 0 || w < pollUntil) { 2848 pollUntil = w 2849 } 2850 } 2851 } 2852 if pollUntil != 0 { 2853 if now == 0 { 2854 now = nanotime() 2855 } 2856 delta = pollUntil - now 2857 if delta < 0 { 2858 delta = 0 2859 } 2860 } 2861 2862 // Check for idle-priority GC work again. 2863 // 2864 // N.B. Since we have no P, gcBlackenEnabled may change at any time; we 2865 // must check again after acquiring a P. 2866 if atomic.Load(&gcBlackenEnabled) != 0 && gcMarkWorkAvailable(nil) { 2867 // Work is available; we can start an idle GC worker only if 2868 // there is an available P and available worker G. 2869 // 2870 // We can attempt to acquire these in either order. Workers are 2871 // almost always available (see comment in findRunnableGCWorker 2872 // for the one case there may be none). Since we're slightly 2873 // less likely to find a P, check for that first. 2874 lock(&sched.lock) 2875 var node *gcBgMarkWorkerNode 2876 _p_ = pidleget() 2877 if _p_ != nil { 2878 // Now that we own a P, gcBlackenEnabled can't change 2879 // (as it requires STW). 2880 if gcBlackenEnabled != 0 { 2881 node = (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop()) 2882 if node == nil { 2883 pidleput(_p_) 2884 _p_ = nil 2885 } 2886 } else { 2887 pidleput(_p_) 2888 _p_ = nil 2889 } 2890 } 2891 unlock(&sched.lock) 2892 if _p_ != nil { 2893 acquirep(_p_) 2894 if wasSpinning { 2895 _g_.m.spinning = true 2896 atomic.Xadd(&sched.nmspinning, 1) 2897 } 2898 2899 // Run the idle worker. 2900 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 2901 gp := node.gp.ptr() 2902 casgstatus(gp, _Gwaiting, _Grunnable) 2903 if trace.enabled { 2904 traceGoUnpark(gp, 0) 2905 } 2906 return gp, false 2907 } 2908 } 2909 2910 // poll network 2911 if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 2912 atomic.Store64(&sched.pollUntil, uint64(pollUntil)) 2913 if _g_.m.p != 0 { 2914 throw("findrunnable: netpoll with p") 2915 } 2916 if _g_.m.spinning { 2917 throw("findrunnable: netpoll with spinning") 2918 } 2919 if faketime != 0 { 2920 // When using fake time, just poll. 2921 delta = 0 2922 } 2923 list := netpoll(delta) // block until new work is available 2924 atomic.Store64(&sched.pollUntil, 0) 2925 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 2926 if faketime != 0 && list.empty() { 2927 // Using fake time and nothing is ready; stop M. 2928 // When all M's stop, checkdead will call timejump. 2929 stopm() 2930 goto top 2931 } 2932 lock(&sched.lock) 2933 _p_ = pidleget() 2934 unlock(&sched.lock) 2935 if _p_ == nil { 2936 injectglist(&list) 2937 } else { 2938 acquirep(_p_) 2939 if !list.empty() { 2940 gp := list.pop() 2941 injectglist(&list) 2942 casgstatus(gp, _Gwaiting, _Grunnable) 2943 if trace.enabled { 2944 traceGoUnpark(gp, 0) 2945 } 2946 return gp, false 2947 } 2948 if wasSpinning { 2949 _g_.m.spinning = true 2950 atomic.Xadd(&sched.nmspinning, 1) 2951 } 2952 goto top 2953 } 2954 } else if pollUntil != 0 && netpollinited() { 2955 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) 2956 if pollerPollUntil == 0 || pollerPollUntil > pollUntil { 2957 netpollBreak() 2958 } 2959 } 2960 stopm() 2961 goto top 2962 } 2963 2964 // pollWork reports whether there is non-background work this P could 2965 // be doing. This is a fairly lightweight check to be used for 2966 // background work loops, like idle GC. It checks a subset of the 2967 // conditions checked by the actual scheduler. 2968 func pollWork() bool { 2969 if sched.runqsize != 0 { 2970 return true 2971 } 2972 p := getg().m.p.ptr() 2973 if !runqempty(p) { 2974 return true 2975 } 2976 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { 2977 if list := netpoll(0); !list.empty() { 2978 injectglist(&list) 2979 return true 2980 } 2981 } 2982 return false 2983 } 2984 2985 // wakeNetPoller wakes up the thread sleeping in the network poller if it isn't 2986 // going to wake up before the when argument; or it wakes an idle P to service 2987 // timers and the network poller if there isn't one already. 2988 func wakeNetPoller(when int64) { 2989 if atomic.Load64(&sched.lastpoll) == 0 { 2990 // In findrunnable we ensure that when polling the pollUntil 2991 // field is either zero or the time to which the current 2992 // poll is expected to run. This can have a spurious wakeup 2993 // but should never miss a wakeup. 2994 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) 2995 if pollerPollUntil == 0 || pollerPollUntil > when { 2996 netpollBreak() 2997 } 2998 } else { 2999 // There are no threads in the network poller, try to get 3000 // one there so it can handle new timers. 3001 if GOOS != "plan9" { // Temporary workaround - see issue #42303. 3002 wakep() 3003 } 3004 } 3005 } 3006 3007 func resetspinning() { 3008 _g_ := getg() 3009 if !_g_.m.spinning { 3010 throw("resetspinning: not a spinning m") 3011 } 3012 _g_.m.spinning = false 3013 nmspinning := atomic.Xadd(&sched.nmspinning, -1) 3014 if int32(nmspinning) < 0 { 3015 throw("findrunnable: negative nmspinning") 3016 } 3017 // M wakeup policy is deliberately somewhat conservative, so check if we 3018 // need to wakeup another P here. See "Worker thread parking/unparking" 3019 // comment at the top of the file for details. 3020 wakep() 3021 } 3022 3023 // injectglist adds each runnable G on the list to some run queue, 3024 // and clears glist. If there is no current P, they are added to the 3025 // global queue, and up to npidle M's are started to run them. 3026 // Otherwise, for each idle P, this adds a G to the global queue 3027 // and starts an M. Any remaining G's are added to the current P's 3028 // local run queue. 3029 // This may temporarily acquire sched.lock. 3030 // Can run concurrently with GC. 3031 func injectglist(glist *gList) { 3032 if glist.empty() { 3033 return 3034 } 3035 if trace.enabled { 3036 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { 3037 traceGoUnpark(gp, 0) 3038 } 3039 } 3040 3041 // Mark all the goroutines as runnable before we put them 3042 // on the run queues. 3043 head := glist.head.ptr() 3044 var tail *g 3045 qsize := 0 3046 for gp := head; gp != nil; gp = gp.schedlink.ptr() { 3047 tail = gp 3048 qsize++ 3049 casgstatus(gp, _Gwaiting, _Grunnable) 3050 } 3051 3052 // Turn the gList into a gQueue. 3053 var q gQueue 3054 q.head.set(head) 3055 q.tail.set(tail) 3056 *glist = gList{} 3057 3058 startIdle := func(n int) { 3059 for ; n != 0 && sched.npidle != 0; n-- { 3060 startm(nil, false) 3061 } 3062 } 3063 3064 pp := getg().m.p.ptr() 3065 if pp == nil { 3066 lock(&sched.lock) 3067 globrunqputbatch(&q, int32(qsize)) 3068 unlock(&sched.lock) 3069 startIdle(qsize) 3070 return 3071 } 3072 3073 npidle := int(atomic.Load(&sched.npidle)) 3074 var globq gQueue 3075 var n int 3076 for n = 0; n < npidle && !q.empty(); n++ { 3077 g := q.pop() 3078 globq.pushBack(g) 3079 } 3080 if n > 0 { 3081 lock(&sched.lock) 3082 globrunqputbatch(&globq, int32(n)) 3083 unlock(&sched.lock) 3084 startIdle(n) 3085 qsize -= n 3086 } 3087 3088 if !q.empty() { 3089 runqputbatch(pp, &q, qsize) 3090 } 3091 } 3092 3093 // One round of scheduler: find a runnable goroutine and execute it. 3094 // Never returns. 3095 func schedule() { 3096 _g_ := getg() 3097 3098 if _g_.m.locks != 0 { 3099 throw("schedule: holding locks") 3100 } 3101 3102 if _g_.m.lockedg != 0 { 3103 stoplockedm() 3104 execute(_g_.m.lockedg.ptr(), false) // Never returns. 3105 } 3106 3107 // We should not schedule away from a g that is executing a cgo call, 3108 // since the cgo call is using the m's g0 stack. 3109 if _g_.m.incgo { 3110 throw("schedule: in cgo") 3111 } 3112 3113 top: 3114 pp := _g_.m.p.ptr() 3115 pp.preempt = false 3116 3117 if sched.gcwaiting != 0 { 3118 gcstopm() 3119 goto top 3120 } 3121 if pp.runSafePointFn != 0 { 3122 runSafePointFn() 3123 } 3124 3125 // Sanity check: if we are spinning, the run queue should be empty. 3126 // Check this before calling checkTimers, as that might call 3127 // goready to put a ready goroutine on the local run queue. 3128 if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) { 3129 throw("schedule: spinning with local work") 3130 } 3131 3132 checkTimers(pp, 0) 3133 3134 var gp *g 3135 var inheritTime bool 3136 3137 // Normal goroutines will check for need to wakeP in ready, 3138 // but GCworkers and tracereaders will not, so the check must 3139 // be done here instead. 3140 tryWakeP := false 3141 if trace.enabled || trace.shutdown { 3142 gp = traceReader() 3143 if gp != nil { 3144 casgstatus(gp, _Gwaiting, _Grunnable) 3145 traceGoUnpark(gp, 0) 3146 tryWakeP = true 3147 } 3148 } 3149 if gp == nil && gcBlackenEnabled != 0 { 3150 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 3151 tryWakeP = tryWakeP || gp != nil 3152 } 3153 if gp == nil { 3154 // Check the global runnable queue once in a while to ensure fairness. 3155 // Otherwise two goroutines can completely occupy the local runqueue 3156 // by constantly respawning each other. 3157 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 3158 lock(&sched.lock) 3159 gp = globrunqget(_g_.m.p.ptr(), 1) 3160 unlock(&sched.lock) 3161 } 3162 } 3163 if gp == nil { 3164 gp, inheritTime = runqget(_g_.m.p.ptr()) 3165 // We can see gp != nil here even if the M is spinning, 3166 // if checkTimers added a local goroutine via goready. 3167 } 3168 if gp == nil { 3169 gp, inheritTime = findrunnable() // blocks until work is available 3170 } 3171 3172 // This thread is going to run a goroutine and is not spinning anymore, 3173 // so if it was marked as spinning we need to reset it now and potentially 3174 // start a new spinning M. 3175 if _g_.m.spinning { 3176 resetspinning() 3177 } 3178 3179 if sched.disable.user && !schedEnabled(gp) { 3180 // Scheduling of this goroutine is disabled. Put it on 3181 // the list of pending runnable goroutines for when we 3182 // re-enable user scheduling and look again. 3183 lock(&sched.lock) 3184 if schedEnabled(gp) { 3185 // Something re-enabled scheduling while we 3186 // were acquiring the lock. 3187 unlock(&sched.lock) 3188 } else { 3189 sched.disable.runnable.pushBack(gp) 3190 sched.disable.n++ 3191 unlock(&sched.lock) 3192 goto top 3193 } 3194 } 3195 3196 // If about to schedule a not-normal goroutine (a GCworker or tracereader), 3197 // wake a P if there is one. 3198 if tryWakeP { 3199 wakep() 3200 } 3201 if gp.lockedm != 0 { 3202 // Hands off own p to the locked m, 3203 // then blocks waiting for a new p. 3204 startlockedm(gp) 3205 goto top 3206 } 3207 3208 execute(gp, inheritTime) 3209 } 3210 3211 // dropg removes the association between m and the current goroutine m->curg (gp for short). 3212 // Typically a caller sets gp's status away from Grunning and then 3213 // immediately calls dropg to finish the job. The caller is also responsible 3214 // for arranging that gp will be restarted using ready at an 3215 // appropriate time. After calling dropg and arranging for gp to be 3216 // readied later, the caller can do other work but eventually should 3217 // call schedule to restart the scheduling of goroutines on this m. 3218 func dropg() { 3219 _g_ := getg() 3220 3221 setMNoWB(&_g_.m.curg.m, nil) 3222 setGNoWB(&_g_.m.curg, nil) 3223 } 3224 3225 // checkTimers runs any timers for the P that are ready. 3226 // If now is not 0 it is the current time. 3227 // It returns the current time or 0 if it is not known, 3228 // and the time when the next timer should run or 0 if there is no next timer, 3229 // and reports whether it ran any timers. 3230 // If the time when the next timer should run is not 0, 3231 // it is always larger than the returned time. 3232 // We pass now in and out to avoid extra calls of nanotime. 3233 //go:yeswritebarrierrec 3234 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) { 3235 // If it's not yet time for the first timer, or the first adjusted 3236 // timer, then there is nothing to do. 3237 next := int64(atomic.Load64(&pp.timer0When)) 3238 nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest)) 3239 if next == 0 || (nextAdj != 0 && nextAdj < next) { 3240 next = nextAdj 3241 } 3242 3243 if next == 0 { 3244 // No timers to run or adjust. 3245 return now, 0, false 3246 } 3247 3248 if now == 0 { 3249 now = nanotime() 3250 } 3251 if now < next { 3252 // Next timer is not ready to run, but keep going 3253 // if we would clear deleted timers. 3254 // This corresponds to the condition below where 3255 // we decide whether to call clearDeletedTimers. 3256 if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) { 3257 return now, next, false 3258 } 3259 } 3260 3261 lock(&pp.timersLock) 3262 3263 if len(pp.timers) > 0 { 3264 adjusttimers(pp, now) 3265 for len(pp.timers) > 0 { 3266 // Note that runtimer may temporarily unlock 3267 // pp.timersLock. 3268 if tw := runtimer(pp, now); tw != 0 { 3269 if tw > 0 { 3270 pollUntil = tw 3271 } 3272 break 3273 } 3274 ran = true 3275 } 3276 } 3277 3278 // If this is the local P, and there are a lot of deleted timers, 3279 // clear them out. We only do this for the local P to reduce 3280 // lock contention on timersLock. 3281 if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 { 3282 clearDeletedTimers(pp) 3283 } 3284 3285 unlock(&pp.timersLock) 3286 3287 return now, pollUntil, ran 3288 } 3289 3290 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 3291 unlock((*mutex)(lock)) 3292 return true 3293 } 3294 3295 // park continuation on g0. 3296 func park_m(gp *g) { 3297 _g_ := getg() 3298 3299 if trace.enabled { 3300 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip) 3301 } 3302 3303 casgstatus(gp, _Grunning, _Gwaiting) 3304 dropg() 3305 3306 if fn := _g_.m.waitunlockf; fn != nil { 3307 ok := fn(gp, _g_.m.waitlock) 3308 _g_.m.waitunlockf = nil 3309 _g_.m.waitlock = nil 3310 if !ok { 3311 if trace.enabled { 3312 traceGoUnpark(gp, 2) 3313 } 3314 casgstatus(gp, _Gwaiting, _Grunnable) 3315 execute(gp, true) // Schedule it back, never returns. 3316 } 3317 } 3318 schedule() 3319 } 3320 3321 func goschedImpl(gp *g) { 3322 status := readgstatus(gp) 3323 if status&^_Gscan != _Grunning { 3324 dumpgstatus(gp) 3325 throw("bad g status") 3326 } 3327 casgstatus(gp, _Grunning, _Grunnable) 3328 dropg() 3329 lock(&sched.lock) 3330 globrunqput(gp) 3331 unlock(&sched.lock) 3332 3333 schedule() 3334 } 3335 3336 // Gosched continuation on g0. 3337 func gosched_m(gp *g) { 3338 if trace.enabled { 3339 traceGoSched() 3340 } 3341 goschedImpl(gp) 3342 } 3343 3344 // goschedguarded is a forbidden-states-avoided version of gosched_m 3345 func goschedguarded_m(gp *g) { 3346 3347 if !canPreemptM(gp.m) { 3348 gogo(&gp.sched) // never return 3349 } 3350 3351 if trace.enabled { 3352 traceGoSched() 3353 } 3354 goschedImpl(gp) 3355 } 3356 3357 func gopreempt_m(gp *g) { 3358 if trace.enabled { 3359 traceGoPreempt() 3360 } 3361 goschedImpl(gp) 3362 } 3363 3364 // preemptPark parks gp and puts it in _Gpreempted. 3365 // 3366 //go:systemstack 3367 func preemptPark(gp *g) { 3368 if trace.enabled { 3369 traceGoPark(traceEvGoBlock, 0) 3370 } 3371 status := readgstatus(gp) 3372 if status&^_Gscan != _Grunning { 3373 dumpgstatus(gp) 3374 throw("bad g status") 3375 } 3376 gp.waitreason = waitReasonPreempted 3377 // Transition from _Grunning to _Gscan|_Gpreempted. We can't 3378 // be in _Grunning when we dropg because then we'd be running 3379 // without an M, but the moment we're in _Gpreempted, 3380 // something could claim this G before we've fully cleaned it 3381 // up. Hence, we set the scan bit to lock down further 3382 // transitions until we can dropg. 3383 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted) 3384 dropg() 3385 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted) 3386 schedule() 3387 } 3388 3389 // goyield is like Gosched, but it: 3390 // - emits a GoPreempt trace event instead of a GoSched trace event 3391 // - puts the current G on the runq of the current P instead of the globrunq 3392 func goyield() { 3393 checkTimeouts() 3394 mcall(goyield_m) 3395 } 3396 3397 func goyield_m(gp *g) { 3398 if trace.enabled { 3399 traceGoPreempt() 3400 } 3401 pp := gp.m.p.ptr() 3402 casgstatus(gp, _Grunning, _Grunnable) 3403 dropg() 3404 runqput(pp, gp, false) 3405 schedule() 3406 } 3407 3408 // Finishes execution of the current goroutine. 3409 func goexit1() { 3410 if raceenabled { 3411 racegoend() 3412 } 3413 if trace.enabled { 3414 traceGoEnd() 3415 } 3416 3417 ///MYCODE 3418 if GlobalEnableOracle { 3419 CurrentGoInfo().RemoveAllRef() 3420 } 3421 3422 mcall(goexit0) 3423 } 3424 3425 // goexit continuation on g0. 3426 func goexit0(gp *g) { 3427 _g_ := getg() 3428 3429 casgstatus(gp, _Grunning, _Gdead) 3430 if isSystemGoroutine(gp, false) { 3431 atomic.Xadd(&sched.ngsys, -1) 3432 } 3433 gp.m = nil 3434 locked := gp.lockedm != 0 3435 gp.lockedm = 0 3436 _g_.m.lockedg = 0 3437 gp.preemptStop = false 3438 gp.paniconfault = false 3439 gp._defer = nil // should be true already but just in case. 3440 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 3441 gp.writebuf = nil 3442 gp.waitreason = 0 3443 gp.param = nil 3444 gp.labels = nil 3445 gp.timer = nil 3446 3447 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { 3448 // Flush assist credit to the global pool. This gives 3449 // better information to pacing if the application is 3450 // rapidly creating an exiting goroutines. 3451 assistWorkPerByte := float64frombits(atomic.Load64(&gcController.assistWorkPerByte)) 3452 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes)) 3453 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit) 3454 gp.gcAssistBytes = 0 3455 } 3456 3457 dropg() 3458 3459 if GOARCH == "wasm" { // no threads yet on wasm 3460 gfput(_g_.m.p.ptr(), gp) 3461 schedule() // never returns 3462 } 3463 3464 if _g_.m.lockedInt != 0 { 3465 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n") 3466 throw("internal lockOSThread error") 3467 } 3468 gfput(_g_.m.p.ptr(), gp) 3469 if locked { 3470 // The goroutine may have locked this thread because 3471 // it put it in an unusual kernel state. Kill it 3472 // rather than returning it to the thread pool. 3473 3474 // Return to mstart, which will release the P and exit 3475 // the thread. 3476 if GOOS != "plan9" { // See golang.org/issue/22227. 3477 gogo(&_g_.m.g0.sched) 3478 } else { 3479 // Clear lockedExt on plan9 since we may end up re-using 3480 // this thread. 3481 _g_.m.lockedExt = 0 3482 } 3483 } 3484 schedule() 3485 } 3486 3487 // save updates getg().sched to refer to pc and sp so that a following 3488 // gogo will restore pc and sp. 3489 // 3490 // save must not have write barriers because invoking a write barrier 3491 // can clobber getg().sched. 3492 // 3493 //go:nosplit 3494 //go:nowritebarrierrec 3495 func save(pc, sp uintptr) { 3496 _g_ := getg() 3497 3498 _g_.sched.pc = pc 3499 _g_.sched.sp = sp 3500 _g_.sched.lr = 0 3501 _g_.sched.ret = 0 3502 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 3503 // We need to ensure ctxt is zero, but can't have a write 3504 // barrier here. However, it should always already be zero. 3505 // Assert that. 3506 if _g_.sched.ctxt != nil { 3507 badctxt() 3508 } 3509 } 3510 3511 // The goroutine g is about to enter a system call. 3512 // Record that it's not using the cpu anymore. 3513 // This is called only from the go syscall library and cgocall, 3514 // not from the low-level system calls used by the runtime. 3515 // 3516 // Entersyscall cannot split the stack: the gosave must 3517 // make g->sched refer to the caller's stack segment, because 3518 // entersyscall is going to return immediately after. 3519 // 3520 // Nothing entersyscall calls can split the stack either. 3521 // We cannot safely move the stack during an active call to syscall, 3522 // because we do not know which of the uintptr arguments are 3523 // really pointers (back into the stack). 3524 // In practice, this means that we make the fast path run through 3525 // entersyscall doing no-split things, and the slow path has to use systemstack 3526 // to run bigger things on the system stack. 3527 // 3528 // reentersyscall is the entry point used by cgo callbacks, where explicitly 3529 // saved SP and PC are restored. This is needed when exitsyscall will be called 3530 // from a function further up in the call stack than the parent, as g->syscallsp 3531 // must always point to a valid stack frame. entersyscall below is the normal 3532 // entry point for syscalls, which obtains the SP and PC from the caller. 3533 // 3534 // Syscall tracing: 3535 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 3536 // If the syscall does not block, that is it, we do not emit any other events. 3537 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 3538 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 3539 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 3540 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 3541 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 3542 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 3543 // and we wait for the increment before emitting traceGoSysExit. 3544 // Note that the increment is done even if tracing is not enabled, 3545 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 3546 // 3547 //go:nosplit 3548 func reentersyscall(pc, sp uintptr) { 3549 _g_ := getg() 3550 3551 // Disable preemption because during this function g is in Gsyscall status, 3552 // but can have inconsistent g->sched, do not let GC observe it. 3553 _g_.m.locks++ 3554 3555 // Entersyscall must not call any function that might split/grow the stack. 3556 // (See details in comment above.) 3557 // Catch calls that might, by replacing the stack guard with something that 3558 // will trip any stack check and leaving a flag to tell newstack to die. 3559 _g_.stackguard0 = stackPreempt 3560 _g_.throwsplit = true 3561 3562 // Leave SP around for GC and traceback. 3563 save(pc, sp) 3564 _g_.syscallsp = sp 3565 _g_.syscallpc = pc 3566 casgstatus(_g_, _Grunning, _Gsyscall) 3567 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 3568 systemstack(func() { 3569 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 3570 throw("entersyscall") 3571 }) 3572 } 3573 3574 if trace.enabled { 3575 systemstack(traceGoSysCall) 3576 // systemstack itself clobbers g.sched.{pc,sp} and we might 3577 // need them later when the G is genuinely blocked in a 3578 // syscall 3579 save(pc, sp) 3580 } 3581 3582 if atomic.Load(&sched.sysmonwait) != 0 { 3583 systemstack(entersyscall_sysmon) 3584 save(pc, sp) 3585 } 3586 3587 if _g_.m.p.ptr().runSafePointFn != 0 { 3588 // runSafePointFn may stack split if run on this stack 3589 systemstack(runSafePointFn) 3590 save(pc, sp) 3591 } 3592 3593 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 3594 _g_.sysblocktraced = true 3595 pp := _g_.m.p.ptr() 3596 pp.m = 0 3597 _g_.m.oldp.set(pp) 3598 _g_.m.p = 0 3599 atomic.Store(&pp.status, _Psyscall) 3600 if sched.gcwaiting != 0 { 3601 systemstack(entersyscall_gcwait) 3602 save(pc, sp) 3603 } 3604 3605 _g_.m.locks-- 3606 } 3607 3608 // Standard syscall entry used by the go syscall library and normal cgo calls. 3609 // 3610 // This is exported via linkname to assembly in the syscall package. 3611 // 3612 //go:nosplit 3613 //go:linkname entersyscall 3614 func entersyscall() { 3615 reentersyscall(getcallerpc(), getcallersp()) 3616 } 3617 3618 func entersyscall_sysmon() { 3619 lock(&sched.lock) 3620 if atomic.Load(&sched.sysmonwait) != 0 { 3621 atomic.Store(&sched.sysmonwait, 0) 3622 notewakeup(&sched.sysmonnote) 3623 } 3624 unlock(&sched.lock) 3625 } 3626 3627 func entersyscall_gcwait() { 3628 _g_ := getg() 3629 _p_ := _g_.m.oldp.ptr() 3630 3631 lock(&sched.lock) 3632 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 3633 if trace.enabled { 3634 traceGoSysBlock(_p_) 3635 traceProcStop(_p_) 3636 } 3637 _p_.syscalltick++ 3638 if sched.stopwait--; sched.stopwait == 0 { 3639 notewakeup(&sched.stopnote) 3640 } 3641 } 3642 unlock(&sched.lock) 3643 } 3644 3645 // The same as entersyscall(), but with a hint that the syscall is blocking. 3646 //go:nosplit 3647 func entersyscallblock() { 3648 _g_ := getg() 3649 3650 _g_.m.locks++ // see comment in entersyscall 3651 _g_.throwsplit = true 3652 _g_.stackguard0 = stackPreempt // see comment in entersyscall 3653 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 3654 _g_.sysblocktraced = true 3655 _g_.m.p.ptr().syscalltick++ 3656 3657 // Leave SP around for GC and traceback. 3658 pc := getcallerpc() 3659 sp := getcallersp() 3660 save(pc, sp) 3661 _g_.syscallsp = _g_.sched.sp 3662 _g_.syscallpc = _g_.sched.pc 3663 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 3664 sp1 := sp 3665 sp2 := _g_.sched.sp 3666 sp3 := _g_.syscallsp 3667 systemstack(func() { 3668 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 3669 throw("entersyscallblock") 3670 }) 3671 } 3672 casgstatus(_g_, _Grunning, _Gsyscall) 3673 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 3674 systemstack(func() { 3675 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 3676 throw("entersyscallblock") 3677 }) 3678 } 3679 3680 systemstack(entersyscallblock_handoff) 3681 3682 // Resave for traceback during blocked call. 3683 save(getcallerpc(), getcallersp()) 3684 3685 _g_.m.locks-- 3686 } 3687 3688 func entersyscallblock_handoff() { 3689 if trace.enabled { 3690 traceGoSysCall() 3691 traceGoSysBlock(getg().m.p.ptr()) 3692 } 3693 handoffp(releasep()) 3694 } 3695 3696 // The goroutine g exited its system call. 3697 // Arrange for it to run on a cpu again. 3698 // This is called only from the go syscall library, not 3699 // from the low-level system calls used by the runtime. 3700 // 3701 // Write barriers are not allowed because our P may have been stolen. 3702 // 3703 // This is exported via linkname to assembly in the syscall package. 3704 // 3705 //go:nosplit 3706 //go:nowritebarrierrec 3707 //go:linkname exitsyscall 3708 func exitsyscall() { 3709 _g_ := getg() 3710 3711 _g_.m.locks++ // see comment in entersyscall 3712 if getcallersp() > _g_.syscallsp { 3713 throw("exitsyscall: syscall frame is no longer valid") 3714 } 3715 3716 _g_.waitsince = 0 3717 oldp := _g_.m.oldp.ptr() 3718 _g_.m.oldp = 0 3719 if exitsyscallfast(oldp) { 3720 if trace.enabled { 3721 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 3722 systemstack(traceGoStart) 3723 } 3724 } 3725 // There's a cpu for us, so we can run. 3726 _g_.m.p.ptr().syscalltick++ 3727 // We need to cas the status and scan before resuming... 3728 casgstatus(_g_, _Gsyscall, _Grunning) 3729 3730 // Garbage collector isn't running (since we are), 3731 // so okay to clear syscallsp. 3732 _g_.syscallsp = 0 3733 _g_.m.locks-- 3734 if _g_.preempt { 3735 // restore the preemption request in case we've cleared it in newstack 3736 _g_.stackguard0 = stackPreempt 3737 } else { 3738 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 3739 _g_.stackguard0 = _g_.stack.lo + _StackGuard 3740 } 3741 _g_.throwsplit = false 3742 3743 if sched.disable.user && !schedEnabled(_g_) { 3744 // Scheduling of this goroutine is disabled. 3745 Gosched() 3746 } 3747 3748 return 3749 } 3750 3751 _g_.sysexitticks = 0 3752 if trace.enabled { 3753 // Wait till traceGoSysBlock event is emitted. 3754 // This ensures consistency of the trace (the goroutine is started after it is blocked). 3755 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 3756 osyield() 3757 } 3758 // We can't trace syscall exit right now because we don't have a P. 3759 // Tracing code can invoke write barriers that cannot run without a P. 3760 // So instead we remember the syscall exit time and emit the event 3761 // in execute when we have a P. 3762 _g_.sysexitticks = cputicks() 3763 } 3764 3765 _g_.m.locks-- 3766 3767 // Call the scheduler. 3768 mcall(exitsyscall0) 3769 3770 // Scheduler returned, so we're allowed to run now. 3771 // Delete the syscallsp information that we left for 3772 // the garbage collector during the system call. 3773 // Must wait until now because until gosched returns 3774 // we don't know for sure that the garbage collector 3775 // is not running. 3776 _g_.syscallsp = 0 3777 _g_.m.p.ptr().syscalltick++ 3778 _g_.throwsplit = false 3779 } 3780 3781 //go:nosplit 3782 func exitsyscallfast(oldp *p) bool { 3783 _g_ := getg() 3784 3785 // Freezetheworld sets stopwait but does not retake P's. 3786 if sched.stopwait == freezeStopWait { 3787 return false 3788 } 3789 3790 // Try to re-acquire the last P. 3791 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) { 3792 // There's a cpu for us, so we can run. 3793 wirep(oldp) 3794 exitsyscallfast_reacquired() 3795 return true 3796 } 3797 3798 // Try to get any other idle P. 3799 if sched.pidle != 0 { 3800 var ok bool 3801 systemstack(func() { 3802 ok = exitsyscallfast_pidle() 3803 if ok && trace.enabled { 3804 if oldp != nil { 3805 // Wait till traceGoSysBlock event is emitted. 3806 // This ensures consistency of the trace (the goroutine is started after it is blocked). 3807 for oldp.syscalltick == _g_.m.syscalltick { 3808 osyield() 3809 } 3810 } 3811 traceGoSysExit(0) 3812 } 3813 }) 3814 if ok { 3815 return true 3816 } 3817 } 3818 return false 3819 } 3820 3821 // exitsyscallfast_reacquired is the exitsyscall path on which this G 3822 // has successfully reacquired the P it was running on before the 3823 // syscall. 3824 // 3825 //go:nosplit 3826 func exitsyscallfast_reacquired() { 3827 _g_ := getg() 3828 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 3829 if trace.enabled { 3830 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 3831 // traceGoSysBlock for this syscall was already emitted, 3832 // but here we effectively retake the p from the new syscall running on the same p. 3833 systemstack(func() { 3834 // Denote blocking of the new syscall. 3835 traceGoSysBlock(_g_.m.p.ptr()) 3836 // Denote completion of the current syscall. 3837 traceGoSysExit(0) 3838 }) 3839 } 3840 _g_.m.p.ptr().syscalltick++ 3841 } 3842 } 3843 3844 func exitsyscallfast_pidle() bool { 3845 lock(&sched.lock) 3846 _p_ := pidleget() 3847 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 3848 atomic.Store(&sched.sysmonwait, 0) 3849 notewakeup(&sched.sysmonnote) 3850 } 3851 unlock(&sched.lock) 3852 if _p_ != nil { 3853 acquirep(_p_) 3854 return true 3855 } 3856 return false 3857 } 3858 3859 // exitsyscall slow path on g0. 3860 // Failed to acquire P, enqueue gp as runnable. 3861 // 3862 //go:nowritebarrierrec 3863 func exitsyscall0(gp *g) { 3864 _g_ := getg() 3865 3866 casgstatus(gp, _Gsyscall, _Grunnable) 3867 dropg() 3868 lock(&sched.lock) 3869 var _p_ *p 3870 if schedEnabled(_g_) { 3871 _p_ = pidleget() 3872 } 3873 if _p_ == nil { 3874 globrunqput(gp) 3875 } else if atomic.Load(&sched.sysmonwait) != 0 { 3876 atomic.Store(&sched.sysmonwait, 0) 3877 notewakeup(&sched.sysmonnote) 3878 } 3879 unlock(&sched.lock) 3880 if _p_ != nil { 3881 acquirep(_p_) 3882 execute(gp, false) // Never returns. 3883 } 3884 if _g_.m.lockedg != 0 { 3885 // Wait until another thread schedules gp and so m again. 3886 stoplockedm() 3887 execute(gp, false) // Never returns. 3888 } 3889 stopm() 3890 schedule() // Never returns. 3891 } 3892 3893 func beforefork() { 3894 gp := getg().m.curg 3895 3896 // Block signals during a fork, so that the child does not run 3897 // a signal handler before exec if a signal is sent to the process 3898 // group. See issue #18600. 3899 gp.m.locks++ 3900 sigsave(&gp.m.sigmask) 3901 sigblock(false) 3902 3903 // This function is called before fork in syscall package. 3904 // Code between fork and exec must not allocate memory nor even try to grow stack. 3905 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 3906 // runtime_AfterFork will undo this in parent process, but not in child. 3907 gp.stackguard0 = stackFork 3908 } 3909 3910 // Called from syscall package before fork. 3911 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 3912 //go:nosplit 3913 func syscall_runtime_BeforeFork() { 3914 systemstack(beforefork) 3915 } 3916 3917 func afterfork() { 3918 gp := getg().m.curg 3919 3920 // See the comments in beforefork. 3921 gp.stackguard0 = gp.stack.lo + _StackGuard 3922 3923 msigrestore(gp.m.sigmask) 3924 3925 gp.m.locks-- 3926 } 3927 3928 // Called from syscall package after fork in parent. 3929 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 3930 //go:nosplit 3931 func syscall_runtime_AfterFork() { 3932 systemstack(afterfork) 3933 } 3934 3935 // inForkedChild is true while manipulating signals in the child process. 3936 // This is used to avoid calling libc functions in case we are using vfork. 3937 var inForkedChild bool 3938 3939 // Called from syscall package after fork in child. 3940 // It resets non-sigignored signals to the default handler, and 3941 // restores the signal mask in preparation for the exec. 3942 // 3943 // Because this might be called during a vfork, and therefore may be 3944 // temporarily sharing address space with the parent process, this must 3945 // not change any global variables or calling into C code that may do so. 3946 // 3947 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild 3948 //go:nosplit 3949 //go:nowritebarrierrec 3950 func syscall_runtime_AfterForkInChild() { 3951 // It's OK to change the global variable inForkedChild here 3952 // because we are going to change it back. There is no race here, 3953 // because if we are sharing address space with the parent process, 3954 // then the parent process can not be running concurrently. 3955 inForkedChild = true 3956 3957 clearSignalHandlers() 3958 3959 // When we are the child we are the only thread running, 3960 // so we know that nothing else has changed gp.m.sigmask. 3961 msigrestore(getg().m.sigmask) 3962 3963 inForkedChild = false 3964 } 3965 3966 // pendingPreemptSignals is the number of preemption signals 3967 // that have been sent but not received. This is only used on Darwin. 3968 // For #41702. 3969 var pendingPreemptSignals uint32 3970 3971 // Called from syscall package before Exec. 3972 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec 3973 func syscall_runtime_BeforeExec() { 3974 // Prevent thread creation during exec. 3975 execLock.lock() 3976 3977 // On Darwin, wait for all pending preemption signals to 3978 // be received. See issue #41702. 3979 if GOOS == "darwin" || GOOS == "ios" { 3980 for int32(atomic.Load(&pendingPreemptSignals)) > 0 { 3981 osyield() 3982 } 3983 } 3984 } 3985 3986 // Called from syscall package after Exec. 3987 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec 3988 func syscall_runtime_AfterExec() { 3989 execLock.unlock() 3990 } 3991 3992 // Allocate a new g, with a stack big enough for stacksize bytes. 3993 func malg(stacksize int32) *g { 3994 newg := new(g) 3995 if stacksize >= 0 { 3996 stacksize = round2(_StackSystem + stacksize) 3997 systemstack(func() { 3998 newg.stack = stackalloc(uint32(stacksize)) 3999 }) 4000 newg.stackguard0 = newg.stack.lo + _StackGuard 4001 newg.stackguard1 = ^uintptr(0) 4002 // Clear the bottom word of the stack. We record g 4003 // there on gsignal stack during VDSO on ARM and ARM64. 4004 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0 4005 } 4006 return newg 4007 } 4008 4009 // Create a new g running fn with siz bytes of arguments. 4010 // Put it on the queue of g's waiting to run. 4011 // The compiler turns a go statement into a call to this. 4012 // 4013 // The stack layout of this call is unusual: it assumes that the 4014 // arguments to pass to fn are on the stack sequentially immediately 4015 // after &fn. Hence, they are logically part of newproc's argument 4016 // frame, even though they don't appear in its signature (and can't 4017 // because their types differ between call sites). 4018 // 4019 // This must be nosplit because this stack layout means there are 4020 // untyped arguments in newproc's argument frame. Stack copies won't 4021 // be able to adjust them and stack splits won't be able to copy them. 4022 // 4023 //go:nosplit 4024 func newproc(siz int32, fn *funcval) { 4025 argp := add(unsafe.Pointer(&fn), sys.PtrSize) 4026 gp := getg() 4027 pc := getcallerpc() 4028 systemstack(func() { 4029 newg := newproc1(fn, argp, siz, gp, pc) 4030 4031 4032 _p_ := getg().m.p.ptr() 4033 runqput(_p_, newg, true) 4034 4035 if mainStarted { 4036 wakep() 4037 } 4038 }) 4039 } 4040 4041 // Create a new g in state _Grunnable, starting at fn, with narg bytes 4042 // of arguments starting at argp. callerpc is the address of the go 4043 // statement that created this. The caller is responsible for adding 4044 // the new g to the scheduler. 4045 // 4046 // This must run on the system stack because it's the continuation of 4047 // newproc, which cannot split the stack. 4048 // 4049 //go:systemstack 4050 func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) *g { 4051 _g_ := getg() 4052 4053 if fn == nil { 4054 _g_.m.throwing = -1 // do not dump full stacks 4055 throw("go of nil func value") 4056 } 4057 acquirem() // disable preemption because it can be holding p in a local var 4058 siz := narg 4059 siz = (siz + 7) &^ 7 4060 4061 // We could allocate a larger initial stack if necessary. 4062 // Not worth it: this is almost always an error. 4063 // 4*sizeof(uintreg): extra space added below 4064 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 4065 if siz >= _StackMin-4*sys.RegSize-sys.RegSize { 4066 throw("newproc: function arguments too large for new goroutine") 4067 } 4068 4069 _p_ := _g_.m.p.ptr() 4070 newg := gfget(_p_) 4071 if newg == nil { 4072 newg = malg(_StackMin) 4073 casgstatus(newg, _Gidle, _Gdead) 4074 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 4075 } 4076 if newg.stack.hi == 0 { 4077 throw("newproc1: newg missing stack") 4078 } 4079 4080 if readgstatus(newg) != _Gdead { 4081 throw("newproc1: new g is not Gdead") 4082 } 4083 4084 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame 4085 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign 4086 sp := newg.stack.hi - totalSize 4087 spArg := sp 4088 if usesLR { 4089 // caller's LR 4090 *(*uintptr)(unsafe.Pointer(sp)) = 0 4091 prepGoExitFrame(sp) 4092 spArg += sys.MinFrameSize 4093 } 4094 if narg > 0 { 4095 memmove(unsafe.Pointer(spArg), argp, uintptr(narg)) 4096 // This is a stack-to-stack copy. If write barriers 4097 // are enabled and the source stack is grey (the 4098 // destination is always black), then perform a 4099 // barrier copy. We do this *after* the memmove 4100 // because the destination stack may have garbage on 4101 // it. 4102 if writeBarrier.needed && !_g_.m.curg.gcscandone { 4103 f := findfunc(fn.fn) 4104 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 4105 if stkmap.nbit > 0 { 4106 // We're in the prologue, so it's always stack map index 0. 4107 bv := stackmapdata(stkmap, 0) 4108 bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata) 4109 } 4110 } 4111 } 4112 4113 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 4114 newg.sched.sp = sp 4115 newg.stktopsp = sp 4116 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 4117 newg.sched.g = guintptr(unsafe.Pointer(newg)) 4118 gostartcallfn(&newg.sched, fn) 4119 newg.gopc = callerpc 4120 newg.ancestors = saveAncestors(callergp) 4121 newg.startpc = fn.fn 4122 if _g_.m.curg != nil { 4123 newg.labels = _g_.m.curg.labels 4124 } 4125 if isSystemGoroutine(newg, false) { 4126 atomic.Xadd(&sched.ngsys, +1) 4127 } 4128 casgstatus(newg, _Gdead, _Grunnable) 4129 4130 if _p_.goidcache == _p_.goidcacheend { 4131 // Sched.goidgen is the last allocated id, 4132 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 4133 // At startup sched.goidgen=0, so main goroutine receives goid=1. 4134 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 4135 _p_.goidcache -= _GoidCacheBatch - 1 4136 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 4137 } 4138 newg.goid = int64(_p_.goidcache) 4139 4140 ///MYCODE 4141 newg.goInfo = NewGoInfo(newg) 4142 4143 _p_.goidcache++ 4144 if raceenabled { 4145 newg.racectx = racegostart(callerpc) 4146 } 4147 if trace.enabled { 4148 traceGoCreate(newg, newg.startpc) 4149 } 4150 releasem(_g_.m) 4151 4152 return newg 4153 } 4154 4155 // saveAncestors copies previous ancestors of the given caller g and 4156 // includes infor for the current caller into a new set of tracebacks for 4157 // a g being created. 4158 func saveAncestors(callergp *g) *[]ancestorInfo { 4159 // Copy all prior info, except for the root goroutine (goid 0). 4160 if debug.tracebackancestors <= 0 || callergp.goid == 0 { 4161 return nil 4162 } 4163 var callerAncestors []ancestorInfo 4164 if callergp.ancestors != nil { 4165 callerAncestors = *callergp.ancestors 4166 } 4167 n := int32(len(callerAncestors)) + 1 4168 if n > debug.tracebackancestors { 4169 n = debug.tracebackancestors 4170 } 4171 ancestors := make([]ancestorInfo, n) 4172 copy(ancestors[1:], callerAncestors) 4173 4174 var pcs [_TracebackMaxFrames]uintptr 4175 npcs := gcallers(callergp, 0, pcs[:]) 4176 ipcs := make([]uintptr, npcs) 4177 copy(ipcs, pcs[:]) 4178 ancestors[0] = ancestorInfo{ 4179 pcs: ipcs, 4180 goid: callergp.goid, 4181 gopc: callergp.gopc, 4182 } 4183 4184 ancestorsp := new([]ancestorInfo) 4185 *ancestorsp = ancestors 4186 return ancestorsp 4187 } 4188 4189 // Put on gfree list. 4190 // If local list is too long, transfer a batch to the global list. 4191 func gfput(_p_ *p, gp *g) { 4192 if readgstatus(gp) != _Gdead { 4193 throw("gfput: bad status (not Gdead)") 4194 } 4195 4196 stksize := gp.stack.hi - gp.stack.lo 4197 4198 if stksize != _FixedStack { 4199 // non-standard stack size - free it. 4200 stackfree(gp.stack) 4201 gp.stack.lo = 0 4202 gp.stack.hi = 0 4203 gp.stackguard0 = 0 4204 } 4205 4206 _p_.gFree.push(gp) 4207 _p_.gFree.n++ 4208 if _p_.gFree.n >= 64 { 4209 lock(&sched.gFree.lock) 4210 for _p_.gFree.n >= 32 { 4211 _p_.gFree.n-- 4212 gp = _p_.gFree.pop() 4213 if gp.stack.lo == 0 { 4214 sched.gFree.noStack.push(gp) 4215 } else { 4216 sched.gFree.stack.push(gp) 4217 } 4218 sched.gFree.n++ 4219 } 4220 unlock(&sched.gFree.lock) 4221 } 4222 } 4223 4224 // Get from gfree list. 4225 // If local list is empty, grab a batch from global list. 4226 func gfget(_p_ *p) *g { 4227 retry: 4228 if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) { 4229 lock(&sched.gFree.lock) 4230 // Move a batch of free Gs to the P. 4231 for _p_.gFree.n < 32 { 4232 // Prefer Gs with stacks. 4233 gp := sched.gFree.stack.pop() 4234 if gp == nil { 4235 gp = sched.gFree.noStack.pop() 4236 if gp == nil { 4237 break 4238 } 4239 } 4240 sched.gFree.n-- 4241 _p_.gFree.push(gp) 4242 _p_.gFree.n++ 4243 } 4244 unlock(&sched.gFree.lock) 4245 goto retry 4246 } 4247 gp := _p_.gFree.pop() 4248 if gp == nil { 4249 return nil 4250 } 4251 _p_.gFree.n-- 4252 if gp.stack.lo == 0 { 4253 // Stack was deallocated in gfput. Allocate a new one. 4254 systemstack(func() { 4255 gp.stack = stackalloc(_FixedStack) 4256 }) 4257 gp.stackguard0 = gp.stack.lo + _StackGuard 4258 } else { 4259 if raceenabled { 4260 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 4261 } 4262 if msanenabled { 4263 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 4264 } 4265 } 4266 return gp 4267 } 4268 4269 // Purge all cached G's from gfree list to the global list. 4270 func gfpurge(_p_ *p) { 4271 lock(&sched.gFree.lock) 4272 for !_p_.gFree.empty() { 4273 gp := _p_.gFree.pop() 4274 _p_.gFree.n-- 4275 if gp.stack.lo == 0 { 4276 sched.gFree.noStack.push(gp) 4277 } else { 4278 sched.gFree.stack.push(gp) 4279 } 4280 sched.gFree.n++ 4281 } 4282 unlock(&sched.gFree.lock) 4283 } 4284 4285 // Breakpoint executes a breakpoint trap. 4286 func Breakpoint() { 4287 breakpoint() 4288 } 4289 4290 // dolockOSThread is called by LockOSThread and lockOSThread below 4291 // after they modify m.locked. Do not allow preemption during this call, 4292 // or else the m might be different in this function than in the caller. 4293 //go:nosplit 4294 func dolockOSThread() { 4295 if GOARCH == "wasm" { 4296 return // no threads on wasm yet 4297 } 4298 _g_ := getg() 4299 _g_.m.lockedg.set(_g_) 4300 _g_.lockedm.set(_g_.m) 4301 } 4302 4303 //go:nosplit 4304 4305 // LockOSThread wires the calling goroutine to its current operating system thread. 4306 // The calling goroutine will always execute in that thread, 4307 // and no other goroutine will execute in it, 4308 // until the calling goroutine has made as many calls to 4309 // UnlockOSThread as to LockOSThread. 4310 // If the calling goroutine exits without unlocking the thread, 4311 // the thread will be terminated. 4312 // 4313 // All init functions are run on the startup thread. Calling LockOSThread 4314 // from an init function will cause the main function to be invoked on 4315 // that thread. 4316 // 4317 // A goroutine should call LockOSThread before calling OS services or 4318 // non-Go library functions that depend on per-thread state. 4319 func LockOSThread() { 4320 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" { 4321 // If we need to start a new thread from the locked 4322 // thread, we need the template thread. Start it now 4323 // while we're in a known-good state. 4324 startTemplateThread() 4325 } 4326 _g_ := getg() 4327 _g_.m.lockedExt++ 4328 if _g_.m.lockedExt == 0 { 4329 _g_.m.lockedExt-- 4330 panic("LockOSThread nesting overflow") 4331 } 4332 dolockOSThread() 4333 } 4334 4335 //go:nosplit 4336 func lockOSThread() { 4337 getg().m.lockedInt++ 4338 dolockOSThread() 4339 } 4340 4341 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 4342 // after they update m->locked. Do not allow preemption during this call, 4343 // or else the m might be in different in this function than in the caller. 4344 //go:nosplit 4345 func dounlockOSThread() { 4346 if GOARCH == "wasm" { 4347 return // no threads on wasm yet 4348 } 4349 _g_ := getg() 4350 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 { 4351 return 4352 } 4353 _g_.m.lockedg = 0 4354 _g_.lockedm = 0 4355 } 4356 4357 //go:nosplit 4358 4359 // UnlockOSThread undoes an earlier call to LockOSThread. 4360 // If this drops the number of active LockOSThread calls on the 4361 // calling goroutine to zero, it unwires the calling goroutine from 4362 // its fixed operating system thread. 4363 // If there are no active LockOSThread calls, this is a no-op. 4364 // 4365 // Before calling UnlockOSThread, the caller must ensure that the OS 4366 // thread is suitable for running other goroutines. If the caller made 4367 // any permanent changes to the state of the thread that would affect 4368 // other goroutines, it should not call this function and thus leave 4369 // the goroutine locked to the OS thread until the goroutine (and 4370 // hence the thread) exits. 4371 func UnlockOSThread() { 4372 _g_ := getg() 4373 if _g_.m.lockedExt == 0 { 4374 return 4375 } 4376 _g_.m.lockedExt-- 4377 dounlockOSThread() 4378 } 4379 4380 //go:nosplit 4381 func unlockOSThread() { 4382 _g_ := getg() 4383 if _g_.m.lockedInt == 0 { 4384 systemstack(badunlockosthread) 4385 } 4386 _g_.m.lockedInt-- 4387 dounlockOSThread() 4388 } 4389 4390 func badunlockosthread() { 4391 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 4392 } 4393 4394 func gcount() int32 { 4395 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys)) 4396 for _, _p_ := range allp { 4397 n -= _p_.gFree.n 4398 } 4399 4400 // All these variables can be changed concurrently, so the result can be inconsistent. 4401 // But at least the current goroutine is running. 4402 if n < 1 { 4403 n = 1 4404 } 4405 return n 4406 } 4407 4408 func mcount() int32 { 4409 return int32(sched.mnext - sched.nmfreed) 4410 } 4411 4412 var prof struct { 4413 signalLock uint32 4414 hz int32 4415 } 4416 4417 func _System() { _System() } 4418 func _ExternalCode() { _ExternalCode() } 4419 func _LostExternalCode() { _LostExternalCode() } 4420 func _GC() { _GC() } 4421 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } 4422 func _VDSO() { _VDSO() } 4423 4424 // Called if we receive a SIGPROF signal. 4425 // Called by the signal handler, may run during STW. 4426 //go:nowritebarrierrec 4427 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 4428 if prof.hz == 0 { 4429 return 4430 } 4431 4432 // If mp.profilehz is 0, then profiling is not enabled for this thread. 4433 // We must check this to avoid a deadlock between setcpuprofilerate 4434 // and the call to cpuprof.add, below. 4435 if mp != nil && mp.profilehz == 0 { 4436 return 4437 } 4438 4439 // On mips{,le}, 64bit atomics are emulated with spinlocks, in 4440 // runtime/internal/atomic. If SIGPROF arrives while the program is inside 4441 // the critical section, it creates a deadlock (when writing the sample). 4442 // As a workaround, create a counter of SIGPROFs while in critical section 4443 // to store the count, and pass it to sigprof.add() later when SIGPROF is 4444 // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc). 4445 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" { 4446 if f := findfunc(pc); f.valid() { 4447 if hasPrefix(funcname(f), "runtime/internal/atomic") { 4448 cpuprof.lostAtomic++ 4449 return 4450 } 4451 } 4452 } 4453 4454 // Profiling runs concurrently with GC, so it must not allocate. 4455 // Set a trap in case the code does allocate. 4456 // Note that on windows, one thread takes profiles of all the 4457 // other threads, so mp is usually not getg().m. 4458 // In fact mp may not even be stopped. 4459 // See golang.org/issue/17165. 4460 getg().m.mallocing++ 4461 4462 // Define that a "user g" is a user-created goroutine, and a "system g" 4463 // is one that is m->g0 or m->gsignal. 4464 // 4465 // We might be interrupted for profiling halfway through a 4466 // goroutine switch. The switch involves updating three (or four) values: 4467 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 4468 // because once it gets updated the new g is running. 4469 // 4470 // When switching from a user g to a system g, LR is not considered live, 4471 // so the update only affects g, SP, and PC. Since PC must be last, there 4472 // the possible partial transitions in ordinary execution are (1) g alone is updated, 4473 // (2) both g and SP are updated, and (3) SP alone is updated. 4474 // If SP or g alone is updated, we can detect the partial transition by checking 4475 // whether the SP is within g's stack bounds. (We could also require that SP 4476 // be changed only after g, but the stack bounds check is needed by other 4477 // cases, so there is no need to impose an additional requirement.) 4478 // 4479 // There is one exceptional transition to a system g, not in ordinary execution. 4480 // When a signal arrives, the operating system starts the signal handler running 4481 // with an updated PC and SP. The g is updated last, at the beginning of the 4482 // handler. There are two reasons this is okay. First, until g is updated the 4483 // g and SP do not match, so the stack bounds check detects the partial transition. 4484 // Second, signal handlers currently run with signals disabled, so a profiling 4485 // signal cannot arrive during the handler. 4486 // 4487 // When switching from a system g to a user g, there are three possibilities. 4488 // 4489 // First, it may be that the g switch has no PC update, because the SP 4490 // either corresponds to a user g throughout (as in asmcgocall) 4491 // or because it has been arranged to look like a user g frame 4492 // (as in cgocallback). In this case, since the entire 4493 // transition is a g+SP update, a partial transition updating just one of 4494 // those will be detected by the stack bounds check. 4495 // 4496 // Second, when returning from a signal handler, the PC and SP updates 4497 // are performed by the operating system in an atomic update, so the g 4498 // update must be done before them. The stack bounds check detects 4499 // the partial transition here, and (again) signal handlers run with signals 4500 // disabled, so a profiling signal cannot arrive then anyway. 4501 // 4502 // Third, the common case: it may be that the switch updates g, SP, and PC 4503 // separately. If the PC is within any of the functions that does this, 4504 // we don't ask for a traceback. C.F. the function setsSP for more about this. 4505 // 4506 // There is another apparently viable approach, recorded here in case 4507 // the "PC within setsSP function" check turns out not to be usable. 4508 // It would be possible to delay the update of either g or SP until immediately 4509 // before the PC update instruction. Then, because of the stack bounds check, 4510 // the only problematic interrupt point is just before that PC update instruction, 4511 // and the sigprof handler can detect that instruction and simulate stepping past 4512 // it in order to reach a consistent state. On ARM, the update of g must be made 4513 // in two places (in R10 and also in a TLS slot), so the delayed update would 4514 // need to be the SP update. The sigprof handler must read the instruction at 4515 // the current PC and if it was the known instruction (for example, JMP BX or 4516 // MOV R2, PC), use that other register in place of the PC value. 4517 // The biggest drawback to this solution is that it requires that we can tell 4518 // whether it's safe to read from the memory pointed at by PC. 4519 // In a correct program, we can test PC == nil and otherwise read, 4520 // but if a profiling signal happens at the instant that a program executes 4521 // a bad jump (before the program manages to handle the resulting fault) 4522 // the profiling handler could fault trying to read nonexistent memory. 4523 // 4524 // To recap, there are no constraints on the assembly being used for the 4525 // transition. We simply require that g and SP match and that the PC is not 4526 // in gogo. 4527 traceback := true 4528 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) || (mp != nil && mp.vdsoSP != 0) { 4529 traceback = false 4530 } 4531 var stk [maxCPUProfStack]uintptr 4532 n := 0 4533 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 4534 cgoOff := 0 4535 // Check cgoCallersUse to make sure that we are not 4536 // interrupting other code that is fiddling with 4537 // cgoCallers. We are running in a signal handler 4538 // with all signals blocked, so we don't have to worry 4539 // about any other code interrupting us. 4540 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 { 4541 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { 4542 cgoOff++ 4543 } 4544 copy(stk[:], mp.cgoCallers[:cgoOff]) 4545 mp.cgoCallers[0] = 0 4546 } 4547 4548 // Collect Go stack that leads to the cgo call. 4549 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0) 4550 if n > 0 { 4551 n += cgoOff 4552 } 4553 } else if traceback { 4554 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 4555 } 4556 4557 if n <= 0 { 4558 // Normal traceback is impossible or has failed. 4559 // See if it falls into several common cases. 4560 n = 0 4561 if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 4562 // Libcall, i.e. runtime syscall on windows. 4563 // Collect Go stack that leads to the call. 4564 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 4565 } 4566 if n == 0 && mp != nil && mp.vdsoSP != 0 { 4567 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 4568 } 4569 if n == 0 { 4570 // If all of the above has failed, account it against abstract "System" or "GC". 4571 n = 2 4572 if inVDSOPage(pc) { 4573 pc = funcPC(_VDSO) + sys.PCQuantum 4574 } else if pc > firstmoduledata.etext { 4575 // "ExternalCode" is better than "etext". 4576 pc = funcPC(_ExternalCode) + sys.PCQuantum 4577 } 4578 stk[0] = pc 4579 if mp.preemptoff != "" { 4580 stk[1] = funcPC(_GC) + sys.PCQuantum 4581 } else { 4582 stk[1] = funcPC(_System) + sys.PCQuantum 4583 } 4584 } 4585 } 4586 4587 if prof.hz != 0 { 4588 cpuprof.add(gp, stk[:n]) 4589 } 4590 getg().m.mallocing-- 4591 } 4592 4593 // If the signal handler receives a SIGPROF signal on a non-Go thread, 4594 // it tries to collect a traceback into sigprofCallers. 4595 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback. 4596 var sigprofCallers cgoCallers 4597 var sigprofCallersUse uint32 4598 4599 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, 4600 // and the signal handler collected a stack trace in sigprofCallers. 4601 // When this is called, sigprofCallersUse will be non-zero. 4602 // g is nil, and what we can do is very limited. 4603 //go:nosplit 4604 //go:nowritebarrierrec 4605 func sigprofNonGo() { 4606 if prof.hz != 0 { 4607 n := 0 4608 for n < len(sigprofCallers) && sigprofCallers[n] != 0 { 4609 n++ 4610 } 4611 cpuprof.addNonGo(sigprofCallers[:n]) 4612 } 4613 4614 atomic.Store(&sigprofCallersUse, 0) 4615 } 4616 4617 // sigprofNonGoPC is called when a profiling signal arrived on a 4618 // non-Go thread and we have a single PC value, not a stack trace. 4619 // g is nil, and what we can do is very limited. 4620 //go:nosplit 4621 //go:nowritebarrierrec 4622 func sigprofNonGoPC(pc uintptr) { 4623 if prof.hz != 0 { 4624 stk := []uintptr{ 4625 pc, 4626 funcPC(_ExternalCode) + sys.PCQuantum, 4627 } 4628 cpuprof.addNonGo(stk) 4629 } 4630 } 4631 4632 // Reports whether a function will set the SP 4633 // to an absolute value. Important that 4634 // we don't traceback when these are at the bottom 4635 // of the stack since we can't be sure that we will 4636 // find the caller. 4637 // 4638 // If the function is not on the bottom of the stack 4639 // we assume that it will have set it up so that traceback will be consistent, 4640 // either by being a traceback terminating function 4641 // or putting one on the stack at the right offset. 4642 func setsSP(pc uintptr) bool { 4643 f := findfunc(pc) 4644 if !f.valid() { 4645 // couldn't find the function for this PC, 4646 // so assume the worst and stop traceback 4647 return true 4648 } 4649 switch f.funcID { 4650 case funcID_gogo, funcID_systemstack, funcID_mcall, funcID_morestack: 4651 return true 4652 } 4653 return false 4654 } 4655 4656 // setcpuprofilerate sets the CPU profiling rate to hz times per second. 4657 // If hz <= 0, setcpuprofilerate turns off CPU profiling. 4658 func setcpuprofilerate(hz int32) { 4659 // Force sane arguments. 4660 if hz < 0 { 4661 hz = 0 4662 } 4663 4664 // Disable preemption, otherwise we can be rescheduled to another thread 4665 // that has profiling enabled. 4666 _g_ := getg() 4667 _g_.m.locks++ 4668 4669 // Stop profiler on this thread so that it is safe to lock prof. 4670 // if a profiling signal came in while we had prof locked, 4671 // it would deadlock. 4672 setThreadCPUProfiler(0) 4673 4674 for !atomic.Cas(&prof.signalLock, 0, 1) { 4675 osyield() 4676 } 4677 if prof.hz != hz { 4678 setProcessCPUProfiler(hz) 4679 prof.hz = hz 4680 } 4681 atomic.Store(&prof.signalLock, 0) 4682 4683 lock(&sched.lock) 4684 sched.profilehz = hz 4685 unlock(&sched.lock) 4686 4687 if hz != 0 { 4688 setThreadCPUProfiler(hz) 4689 } 4690 4691 _g_.m.locks-- 4692 } 4693 4694 // init initializes pp, which may be a freshly allocated p or a 4695 // previously destroyed p, and transitions it to status _Pgcstop. 4696 func (pp *p) init(id int32) { 4697 pp.id = id 4698 pp.status = _Pgcstop 4699 pp.sudogcache = pp.sudogbuf[:0] 4700 for i := range pp.deferpool { 4701 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 4702 } 4703 pp.wbBuf.reset() 4704 if pp.mcache == nil { 4705 if id == 0 { 4706 if mcache0 == nil { 4707 throw("missing mcache?") 4708 } 4709 // Use the bootstrap mcache0. Only one P will get 4710 // mcache0: the one with ID 0. 4711 pp.mcache = mcache0 4712 } else { 4713 pp.mcache = allocmcache() 4714 } 4715 } 4716 if raceenabled && pp.raceprocctx == 0 { 4717 if id == 0 { 4718 pp.raceprocctx = raceprocctx0 4719 raceprocctx0 = 0 // bootstrap 4720 } else { 4721 pp.raceprocctx = raceproccreate() 4722 } 4723 } 4724 lockInit(&pp.timersLock, lockRankTimers) 4725 4726 // This P may get timers when it starts running. Set the mask here 4727 // since the P may not go through pidleget (notably P 0 on startup). 4728 timerpMask.set(id) 4729 // Similarly, we may not go through pidleget before this P starts 4730 // running if it is P 0 on startup. 4731 idlepMask.clear(id) 4732 } 4733 4734 // destroy releases all of the resources associated with pp and 4735 // transitions it to status _Pdead. 4736 // 4737 // sched.lock must be held and the world must be stopped. 4738 func (pp *p) destroy() { 4739 assertLockHeld(&sched.lock) 4740 assertWorldStopped() 4741 4742 // Move all runnable goroutines to the global queue 4743 for pp.runqhead != pp.runqtail { 4744 // Pop from tail of local queue 4745 pp.runqtail-- 4746 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr() 4747 // Push onto head of global queue 4748 globrunqputhead(gp) 4749 } 4750 if pp.runnext != 0 { 4751 globrunqputhead(pp.runnext.ptr()) 4752 pp.runnext = 0 4753 } 4754 if len(pp.timers) > 0 { 4755 plocal := getg().m.p.ptr() 4756 // The world is stopped, but we acquire timersLock to 4757 // protect against sysmon calling timeSleepUntil. 4758 // This is the only case where we hold the timersLock of 4759 // more than one P, so there are no deadlock concerns. 4760 lock(&plocal.timersLock) 4761 lock(&pp.timersLock) 4762 moveTimers(plocal, pp.timers) 4763 pp.timers = nil 4764 pp.numTimers = 0 4765 pp.adjustTimers = 0 4766 pp.deletedTimers = 0 4767 atomic.Store64(&pp.timer0When, 0) 4768 unlock(&pp.timersLock) 4769 unlock(&plocal.timersLock) 4770 } 4771 // Flush p's write barrier buffer. 4772 if gcphase != _GCoff { 4773 wbBufFlush1(pp) 4774 pp.gcw.dispose() 4775 } 4776 for i := range pp.sudogbuf { 4777 pp.sudogbuf[i] = nil 4778 } 4779 pp.sudogcache = pp.sudogbuf[:0] 4780 for i := range pp.deferpool { 4781 for j := range pp.deferpoolbuf[i] { 4782 pp.deferpoolbuf[i][j] = nil 4783 } 4784 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 4785 } 4786 systemstack(func() { 4787 for i := 0; i < pp.mspancache.len; i++ { 4788 // Safe to call since the world is stopped. 4789 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i])) 4790 } 4791 pp.mspancache.len = 0 4792 lock(&mheap_.lock) 4793 pp.pcache.flush(&mheap_.pages) 4794 unlock(&mheap_.lock) 4795 }) 4796 freemcache(pp.mcache) 4797 pp.mcache = nil 4798 gfpurge(pp) 4799 traceProcFree(pp) 4800 if raceenabled { 4801 if pp.timerRaceCtx != 0 { 4802 // The race detector code uses a callback to fetch 4803 // the proc context, so arrange for that callback 4804 // to see the right thing. 4805 // This hack only works because we are the only 4806 // thread running. 4807 mp := getg().m 4808 phold := mp.p.ptr() 4809 mp.p.set(pp) 4810 4811 racectxend(pp.timerRaceCtx) 4812 pp.timerRaceCtx = 0 4813 4814 mp.p.set(phold) 4815 } 4816 raceprocdestroy(pp.raceprocctx) 4817 pp.raceprocctx = 0 4818 } 4819 pp.gcAssistTime = 0 4820 pp.status = _Pdead 4821 } 4822 4823 // Change number of processors. 4824 // 4825 // sched.lock must be held, and the world must be stopped. 4826 // 4827 // gcworkbufs must not be being modified by either the GC or the write barrier 4828 // code, so the GC must not be running if the number of Ps actually changes. 4829 // 4830 // Returns list of Ps with local work, they need to be scheduled by the caller. 4831 func procresize(nprocs int32) *p { 4832 assertLockHeld(&sched.lock) 4833 assertWorldStopped() 4834 4835 old := gomaxprocs 4836 if old < 0 || nprocs <= 0 { 4837 throw("procresize: invalid arg") 4838 } 4839 if trace.enabled { 4840 traceGomaxprocs(nprocs) 4841 } 4842 4843 // update statistics 4844 now := nanotime() 4845 if sched.procresizetime != 0 { 4846 sched.totaltime += int64(old) * (now - sched.procresizetime) 4847 } 4848 sched.procresizetime = now 4849 4850 maskWords := (nprocs + 31) / 32 4851 4852 // Grow allp if necessary. 4853 if nprocs > int32(len(allp)) { 4854 // Synchronize with retake, which could be running 4855 // concurrently since it doesn't run on a P. 4856 lock(&allpLock) 4857 if nprocs <= int32(cap(allp)) { 4858 allp = allp[:nprocs] 4859 } else { 4860 nallp := make([]*p, nprocs) 4861 // Copy everything up to allp's cap so we 4862 // never lose old allocated Ps. 4863 copy(nallp, allp[:cap(allp)]) 4864 allp = nallp 4865 } 4866 4867 if maskWords <= int32(cap(idlepMask)) { 4868 idlepMask = idlepMask[:maskWords] 4869 timerpMask = timerpMask[:maskWords] 4870 } else { 4871 nidlepMask := make([]uint32, maskWords) 4872 // No need to copy beyond len, old Ps are irrelevant. 4873 copy(nidlepMask, idlepMask) 4874 idlepMask = nidlepMask 4875 4876 ntimerpMask := make([]uint32, maskWords) 4877 copy(ntimerpMask, timerpMask) 4878 timerpMask = ntimerpMask 4879 } 4880 unlock(&allpLock) 4881 } 4882 4883 // initialize new P's 4884 for i := old; i < nprocs; i++ { 4885 pp := allp[i] 4886 if pp == nil { 4887 pp = new(p) 4888 } 4889 pp.init(i) 4890 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 4891 } 4892 4893 _g_ := getg() 4894 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 4895 // continue to use the current P 4896 _g_.m.p.ptr().status = _Prunning 4897 _g_.m.p.ptr().mcache.prepareForSweep() 4898 } else { 4899 // release the current P and acquire allp[0]. 4900 // 4901 // We must do this before destroying our current P 4902 // because p.destroy itself has write barriers, so we 4903 // need to do that from a valid P. 4904 if _g_.m.p != 0 { 4905 if trace.enabled { 4906 // Pretend that we were descheduled 4907 // and then scheduled again to keep 4908 // the trace sane. 4909 traceGoSched() 4910 traceProcStop(_g_.m.p.ptr()) 4911 } 4912 _g_.m.p.ptr().m = 0 4913 } 4914 _g_.m.p = 0 4915 p := allp[0] 4916 p.m = 0 4917 p.status = _Pidle 4918 acquirep(p) 4919 if trace.enabled { 4920 traceGoStart() 4921 } 4922 } 4923 4924 // g.m.p is now set, so we no longer need mcache0 for bootstrapping. 4925 mcache0 = nil 4926 4927 // release resources from unused P's 4928 for i := nprocs; i < old; i++ { 4929 p := allp[i] 4930 p.destroy() 4931 // can't free P itself because it can be referenced by an M in syscall 4932 } 4933 4934 // Trim allp. 4935 if int32(len(allp)) != nprocs { 4936 lock(&allpLock) 4937 allp = allp[:nprocs] 4938 idlepMask = idlepMask[:maskWords] 4939 timerpMask = timerpMask[:maskWords] 4940 unlock(&allpLock) 4941 } 4942 4943 var runnablePs *p 4944 for i := nprocs - 1; i >= 0; i-- { 4945 p := allp[i] 4946 if _g_.m.p.ptr() == p { 4947 continue 4948 } 4949 p.status = _Pidle 4950 if runqempty(p) { 4951 pidleput(p) 4952 } else { 4953 p.m.set(mget()) 4954 p.link.set(runnablePs) 4955 runnablePs = p 4956 } 4957 } 4958 stealOrder.reset(uint32(nprocs)) 4959 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 4960 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 4961 return runnablePs 4962 } 4963 4964 // Associate p and the current m. 4965 // 4966 // This function is allowed to have write barriers even if the caller 4967 // isn't because it immediately acquires _p_. 4968 // 4969 //go:yeswritebarrierrec 4970 func acquirep(_p_ *p) { 4971 // Do the part that isn't allowed to have write barriers. 4972 wirep(_p_) 4973 4974 // Have p; write barriers now allowed. 4975 4976 // Perform deferred mcache flush before this P can allocate 4977 // from a potentially stale mcache. 4978 _p_.mcache.prepareForSweep() 4979 4980 if trace.enabled { 4981 traceProcStart() 4982 } 4983 } 4984 4985 // wirep is the first step of acquirep, which actually associates the 4986 // current M to _p_. This is broken out so we can disallow write 4987 // barriers for this part, since we don't yet have a P. 4988 // 4989 //go:nowritebarrierrec 4990 //go:nosplit 4991 func wirep(_p_ *p) { 4992 _g_ := getg() 4993 4994 if _g_.m.p != 0 { 4995 throw("wirep: already in go") 4996 } 4997 if _p_.m != 0 || _p_.status != _Pidle { 4998 id := int64(0) 4999 if _p_.m != 0 { 5000 id = _p_.m.ptr().id 5001 } 5002 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 5003 throw("wirep: invalid p state") 5004 } 5005 _g_.m.p.set(_p_) 5006 _p_.m.set(_g_.m) 5007 _p_.status = _Prunning 5008 } 5009 5010 // Disassociate p and the current m. 5011 func releasep() *p { 5012 _g_ := getg() 5013 5014 if _g_.m.p == 0 { 5015 throw("releasep: invalid arg") 5016 } 5017 _p_ := _g_.m.p.ptr() 5018 if _p_.m.ptr() != _g_.m || _p_.status != _Prunning { 5019 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n") 5020 throw("releasep: invalid p state") 5021 } 5022 if trace.enabled { 5023 traceProcStop(_g_.m.p.ptr()) 5024 } 5025 _g_.m.p = 0 5026 _p_.m = 0 5027 _p_.status = _Pidle 5028 return _p_ 5029 } 5030 5031 func incidlelocked(v int32) { 5032 lock(&sched.lock) 5033 sched.nmidlelocked += v 5034 if v > 0 { 5035 checkdead() 5036 } 5037 unlock(&sched.lock) 5038 } 5039 5040 // Check for deadlock situation. 5041 // The check is based on number of running M's, if 0 -> deadlock. 5042 // sched.lock must be held. 5043 func checkdead() { 5044 assertLockHeld(&sched.lock) 5045 5046 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 5047 // there are no running goroutines. The calling program is 5048 // assumed to be running. 5049 if islibrary || isarchive { 5050 return 5051 } 5052 5053 // If we are dying because of a signal caught on an already idle thread, 5054 // freezetheworld will cause all running threads to block. 5055 // And runtime will essentially enter into deadlock state, 5056 // except that there is a thread that will call exit soon. 5057 if panicking > 0 { 5058 return 5059 } 5060 5061 // If we are not running under cgo, but we have an extra M then account 5062 // for it. (It is possible to have an extra M on Windows without cgo to 5063 // accommodate callbacks created by syscall.NewCallback. See issue #6751 5064 // for details.) 5065 var run0 int32 5066 if !iscgo && cgoHasExtraM { 5067 mp := lockextra(true) 5068 haveExtraM := extraMCount > 0 5069 unlockextra(mp) 5070 if haveExtraM { 5071 run0 = 1 5072 } 5073 } 5074 5075 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys 5076 if run > run0 { 5077 return 5078 } 5079 if run < 0 { 5080 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n") 5081 throw("checkdead: inconsistent counts") 5082 } 5083 5084 grunning := 0 5085 lock(&allglock) 5086 for i := 0; i < len(allgs); i++ { 5087 gp := allgs[i] 5088 if isSystemGoroutine(gp, false) { 5089 continue 5090 } 5091 s := readgstatus(gp) 5092 switch s &^ _Gscan { 5093 case _Gwaiting, 5094 _Gpreempted: 5095 grunning++ 5096 case _Grunnable, 5097 _Grunning, 5098 _Gsyscall: 5099 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 5100 throw("checkdead: runnable g") 5101 } 5102 } 5103 unlock(&allglock) 5104 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 5105 unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang 5106 throw("no goroutines (main called runtime.Goexit) - deadlock!") 5107 } 5108 5109 // Maybe jump time forward for playground. 5110 if faketime != 0 { 5111 when, _p_ := timeSleepUntil() 5112 if _p_ != nil { 5113 faketime = when 5114 for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link { 5115 if (*pp).ptr() == _p_ { 5116 *pp = _p_.link 5117 break 5118 } 5119 } 5120 mp := mget() 5121 if mp == nil { 5122 // There should always be a free M since 5123 // nothing is running. 5124 throw("checkdead: no m for timer") 5125 } 5126 mp.nextp.set(_p_) 5127 notewakeup(&mp.park) 5128 return 5129 } 5130 } 5131 5132 // There are no goroutines running, so we can look at the P's. 5133 for _, _p_ := range allp { 5134 if len(_p_.timers) > 0 { 5135 return 5136 } 5137 } 5138 5139 getg().m.throwing = -1 // do not dump full stacks 5140 unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang 5141 throw("all goroutines are asleep - deadlock!") 5142 } 5143 5144 // forcegcperiod is the maximum time in nanoseconds between garbage 5145 // collections. If we go this long without a garbage collection, one 5146 // is forced to run. 5147 // 5148 // This is a variable for testing purposes. It normally doesn't change. 5149 var forcegcperiod int64 = 2 * 60 * 1e9 5150 5151 // Always runs without a P, so write barriers are not allowed. 5152 // 5153 //go:nowritebarrierrec 5154 func sysmon() { 5155 lock(&sched.lock) 5156 sched.nmsys++ 5157 checkdead() 5158 unlock(&sched.lock) 5159 5160 // For syscall_runtime_doAllThreadsSyscall, sysmon is 5161 // sufficiently up to participate in fixups. 5162 atomic.Store(&sched.sysmonStarting, 0) 5163 5164 lasttrace := int64(0) 5165 idle := 0 // how many cycles in succession we had not wokeup somebody 5166 delay := uint32(0) 5167 5168 for { 5169 if idle == 0 { // start with 20us sleep... 5170 delay = 20 5171 } else if idle > 50 { // start doubling the sleep after 1ms... 5172 delay *= 2 5173 } 5174 if delay > 10*1000 { // up to 10ms 5175 delay = 10 * 1000 5176 } 5177 usleep(delay) 5178 mDoFixup() 5179 5180 // sysmon should not enter deep sleep if schedtrace is enabled so that 5181 // it can print that information at the right time. 5182 // 5183 // It should also not enter deep sleep if there are any active P's so 5184 // that it can retake P's from syscalls, preempt long running G's, and 5185 // poll the network if all P's are busy for long stretches. 5186 // 5187 // It should wakeup from deep sleep if any P's become active either due 5188 // to exiting a syscall or waking up due to a timer expiring so that it 5189 // can resume performing those duties. If it wakes from a syscall it 5190 // resets idle and delay as a bet that since it had retaken a P from a 5191 // syscall before, it may need to do it again shortly after the 5192 // application starts work again. It does not reset idle when waking 5193 // from a timer to avoid adding system load to applications that spend 5194 // most of their time sleeping. 5195 now := nanotime() 5196 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { 5197 lock(&sched.lock) 5198 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 5199 syscallWake := false 5200 next, _ := timeSleepUntil() 5201 if next > now { 5202 atomic.Store(&sched.sysmonwait, 1) 5203 unlock(&sched.lock) 5204 // Make wake-up period small enough 5205 // for the sampling to be correct. 5206 sleep := forcegcperiod / 2 5207 if next-now < sleep { 5208 sleep = next - now 5209 } 5210 shouldRelax := sleep >= osRelaxMinNS 5211 if shouldRelax { 5212 osRelax(true) 5213 } 5214 syscallWake = notetsleep(&sched.sysmonnote, sleep) 5215 mDoFixup() 5216 if shouldRelax { 5217 osRelax(false) 5218 } 5219 lock(&sched.lock) 5220 atomic.Store(&sched.sysmonwait, 0) 5221 noteclear(&sched.sysmonnote) 5222 } 5223 if syscallWake { 5224 idle = 0 5225 delay = 20 5226 } 5227 } 5228 unlock(&sched.lock) 5229 } 5230 5231 lock(&sched.sysmonlock) 5232 // Update now in case we blocked on sysmonnote or spent a long time 5233 // blocked on schedlock or sysmonlock above. 5234 now = nanotime() 5235 5236 // trigger libc interceptors if needed 5237 if *cgo_yield != nil { 5238 asmcgocall(*cgo_yield, nil) 5239 } 5240 // poll network if not polled for more than 10ms 5241 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 5242 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now { 5243 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 5244 list := netpoll(0) // non-blocking - returns list of goroutines 5245 if !list.empty() { 5246 // Need to decrement number of idle locked M's 5247 // (pretending that one more is running) before injectglist. 5248 // Otherwise it can lead to the following situation: 5249 // injectglist grabs all P's but before it starts M's to run the P's, 5250 // another M returns from syscall, finishes running its G, 5251 // observes that there is no work to do and no other running M's 5252 // and reports deadlock. 5253 incidlelocked(-1) 5254 injectglist(&list) 5255 incidlelocked(1) 5256 } 5257 } 5258 mDoFixup() 5259 if GOOS == "netbsd" { 5260 // netpoll is responsible for waiting for timer 5261 // expiration, so we typically don't have to worry 5262 // about starting an M to service timers. (Note that 5263 // sleep for timeSleepUntil above simply ensures sysmon 5264 // starts running again when that timer expiration may 5265 // cause Go code to run again). 5266 // 5267 // However, netbsd has a kernel bug that sometimes 5268 // misses netpollBreak wake-ups, which can lead to 5269 // unbounded delays servicing timers. If we detect this 5270 // overrun, then startm to get something to handle the 5271 // timer. 5272 // 5273 // See issue 42515 and 5274 // https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094. 5275 if next, _ := timeSleepUntil(); next < now { 5276 startm(nil, false) 5277 } 5278 } 5279 if atomic.Load(&scavenge.sysmonWake) != 0 { 5280 // Kick the scavenger awake if someone requested it. 5281 wakeScavenger() 5282 } 5283 // retake P's blocked in syscalls 5284 // and preempt long running G's 5285 if retake(now) != 0 { 5286 idle = 0 5287 } else { 5288 idle++ 5289 } 5290 // check if we need to force a GC 5291 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { 5292 lock(&forcegc.lock) 5293 forcegc.idle = 0 5294 var list gList 5295 list.push(forcegc.g) 5296 injectglist(&list) 5297 unlock(&forcegc.lock) 5298 } 5299 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { 5300 lasttrace = now 5301 schedtrace(debug.scheddetail > 0) 5302 } 5303 unlock(&sched.sysmonlock) 5304 } 5305 } 5306 5307 type sysmontick struct { 5308 schedtick uint32 5309 schedwhen int64 5310 syscalltick uint32 5311 syscallwhen int64 5312 } 5313 5314 // forcePreemptNS is the time slice given to a G before it is 5315 // preempted. 5316 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 5317 5318 func retake(now int64) uint32 { 5319 n := 0 5320 // Prevent allp slice changes. This lock will be completely 5321 // uncontended unless we're already stopping the world. 5322 lock(&allpLock) 5323 // We can't use a range loop over allp because we may 5324 // temporarily drop the allpLock. Hence, we need to re-fetch 5325 // allp each time around the loop. 5326 for i := 0; i < len(allp); i++ { 5327 _p_ := allp[i] 5328 if _p_ == nil { 5329 // This can happen if procresize has grown 5330 // allp but not yet created new Ps. 5331 continue 5332 } 5333 pd := &_p_.sysmontick 5334 s := _p_.status 5335 sysretake := false 5336 if s == _Prunning || s == _Psyscall { 5337 // Preempt G if it's running for too long. 5338 t := int64(_p_.schedtick) 5339 if int64(pd.schedtick) != t { 5340 pd.schedtick = uint32(t) 5341 pd.schedwhen = now 5342 } else if pd.schedwhen+forcePreemptNS <= now { 5343 preemptone(_p_) 5344 // In case of syscall, preemptone() doesn't 5345 // work, because there is no M wired to P. 5346 sysretake = true 5347 } 5348 } 5349 if s == _Psyscall { 5350 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 5351 t := int64(_p_.syscalltick) 5352 if !sysretake && int64(pd.syscalltick) != t { 5353 pd.syscalltick = uint32(t) 5354 pd.syscallwhen = now 5355 continue 5356 } 5357 // On the one hand we don't want to retake Ps if there is no other work to do, 5358 // but on the other hand we want to retake them eventually 5359 // because they can prevent the sysmon thread from deep sleep. 5360 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 5361 continue 5362 } 5363 // Drop allpLock so we can take sched.lock. 5364 unlock(&allpLock) 5365 // Need to decrement number of idle locked M's 5366 // (pretending that one more is running) before the CAS. 5367 // Otherwise the M from which we retake can exit the syscall, 5368 // increment nmidle and report deadlock. 5369 incidlelocked(-1) 5370 if atomic.Cas(&_p_.status, s, _Pidle) { 5371 if trace.enabled { 5372 traceGoSysBlock(_p_) 5373 traceProcStop(_p_) 5374 } 5375 n++ 5376 _p_.syscalltick++ 5377 handoffp(_p_) 5378 } 5379 incidlelocked(1) 5380 lock(&allpLock) 5381 } 5382 } 5383 unlock(&allpLock) 5384 return uint32(n) 5385 } 5386 5387 // Tell all goroutines that they have been preempted and they should stop. 5388 // This function is purely best-effort. It can fail to inform a goroutine if a 5389 // processor just started running it. 5390 // No locks need to be held. 5391 // Returns true if preemption request was issued to at least one goroutine. 5392 func preemptall() bool { 5393 res := false 5394 for _, _p_ := range allp { 5395 if _p_.status != _Prunning { 5396 continue 5397 } 5398 if preemptone(_p_) { 5399 res = true 5400 } 5401 } 5402 return res 5403 } 5404 5405 // Tell the goroutine running on processor P to stop. 5406 // This function is purely best-effort. It can incorrectly fail to inform the 5407 // goroutine. It can send inform the wrong goroutine. Even if it informs the 5408 // correct goroutine, that goroutine might ignore the request if it is 5409 // simultaneously executing newstack. 5410 // No lock needs to be held. 5411 // Returns true if preemption request was issued. 5412 // The actual preemption will happen at some point in the future 5413 // and will be indicated by the gp->status no longer being 5414 // Grunning 5415 func preemptone(_p_ *p) bool { 5416 mp := _p_.m.ptr() 5417 if mp == nil || mp == getg().m { 5418 return false 5419 } 5420 gp := mp.curg 5421 if gp == nil || gp == mp.g0 { 5422 return false 5423 } 5424 5425 gp.preempt = true 5426 5427 // Every call in a go routine checks for stack overflow by 5428 // comparing the current stack pointer to gp->stackguard0. 5429 // Setting gp->stackguard0 to StackPreempt folds 5430 // preemption into the normal stack overflow check. 5431 gp.stackguard0 = stackPreempt 5432 5433 // Request an async preemption of this P. 5434 if preemptMSupported && debug.asyncpreemptoff == 0 { 5435 _p_.preempt = true 5436 preemptM(mp) 5437 } 5438 5439 return true 5440 } 5441 5442 var starttime int64 5443 5444 func schedtrace(detailed bool) { 5445 now := nanotime() 5446 if starttime == 0 { 5447 starttime = now 5448 } 5449 5450 lock(&sched.lock) 5451 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 5452 if detailed { 5453 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 5454 } 5455 // We must be careful while reading data from P's, M's and G's. 5456 // Even if we hold schedlock, most data can be changed concurrently. 5457 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 5458 for i, _p_ := range allp { 5459 mp := _p_.m.ptr() 5460 h := atomic.Load(&_p_.runqhead) 5461 t := atomic.Load(&_p_.runqtail) 5462 if detailed { 5463 id := int64(-1) 5464 if mp != nil { 5465 id = mp.id 5466 } 5467 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n") 5468 } else { 5469 // In non-detailed mode format lengths of per-P run queues as: 5470 // [len1 len2 len3 len4] 5471 print(" ") 5472 if i == 0 { 5473 print("[") 5474 } 5475 print(t - h) 5476 if i == len(allp)-1 { 5477 print("]\n") 5478 } 5479 } 5480 } 5481 5482 if !detailed { 5483 unlock(&sched.lock) 5484 return 5485 } 5486 5487 for mp := allm; mp != nil; mp = mp.alllink { 5488 _p_ := mp.p.ptr() 5489 gp := mp.curg 5490 lockedg := mp.lockedg.ptr() 5491 id1 := int32(-1) 5492 if _p_ != nil { 5493 id1 = _p_.id 5494 } 5495 id2 := int64(-1) 5496 if gp != nil { 5497 id2 = gp.goid 5498 } 5499 id3 := int64(-1) 5500 if lockedg != nil { 5501 id3 = lockedg.goid 5502 } 5503 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") 5504 } 5505 5506 lock(&allglock) 5507 for gi := 0; gi < len(allgs); gi++ { 5508 gp := allgs[gi] 5509 mp := gp.m 5510 lockedm := gp.lockedm.ptr() 5511 id1 := int64(-1) 5512 if mp != nil { 5513 id1 = mp.id 5514 } 5515 id2 := int64(-1) 5516 if lockedm != nil { 5517 id2 = lockedm.id 5518 } 5519 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n") 5520 } 5521 unlock(&allglock) 5522 unlock(&sched.lock) 5523 } 5524 5525 // schedEnableUser enables or disables the scheduling of user 5526 // goroutines. 5527 // 5528 // This does not stop already running user goroutines, so the caller 5529 // should first stop the world when disabling user goroutines. 5530 func schedEnableUser(enable bool) { 5531 lock(&sched.lock) 5532 if sched.disable.user == !enable { 5533 unlock(&sched.lock) 5534 return 5535 } 5536 sched.disable.user = !enable 5537 if enable { 5538 n := sched.disable.n 5539 sched.disable.n = 0 5540 globrunqputbatch(&sched.disable.runnable, n) 5541 unlock(&sched.lock) 5542 for ; n != 0 && sched.npidle != 0; n-- { 5543 startm(nil, false) 5544 } 5545 } else { 5546 unlock(&sched.lock) 5547 } 5548 } 5549 5550 // schedEnabled reports whether gp should be scheduled. It returns 5551 // false is scheduling of gp is disabled. 5552 // 5553 // sched.lock must be held. 5554 func schedEnabled(gp *g) bool { 5555 assertLockHeld(&sched.lock) 5556 5557 if sched.disable.user { 5558 return isSystemGoroutine(gp, true) 5559 } 5560 return true 5561 } 5562 5563 // Put mp on midle list. 5564 // sched.lock must be held. 5565 // May run during STW, so write barriers are not allowed. 5566 //go:nowritebarrierrec 5567 func mput(mp *m) { 5568 assertLockHeld(&sched.lock) 5569 5570 mp.schedlink = sched.midle 5571 sched.midle.set(mp) 5572 sched.nmidle++ 5573 checkdead() 5574 } 5575 5576 // Try to get an m from midle list. 5577 // sched.lock must be held. 5578 // May run during STW, so write barriers are not allowed. 5579 //go:nowritebarrierrec 5580 func mget() *m { 5581 assertLockHeld(&sched.lock) 5582 5583 mp := sched.midle.ptr() 5584 if mp != nil { 5585 sched.midle = mp.schedlink 5586 sched.nmidle-- 5587 } 5588 return mp 5589 } 5590 5591 // Put gp on the global runnable queue. 5592 // sched.lock must be held. 5593 // May run during STW, so write barriers are not allowed. 5594 //go:nowritebarrierrec 5595 func globrunqput(gp *g) { 5596 assertLockHeld(&sched.lock) 5597 5598 sched.runq.pushBack(gp) 5599 sched.runqsize++ 5600 } 5601 5602 // Put gp at the head of the global runnable queue. 5603 // sched.lock must be held. 5604 // May run during STW, so write barriers are not allowed. 5605 //go:nowritebarrierrec 5606 func globrunqputhead(gp *g) { 5607 assertLockHeld(&sched.lock) 5608 5609 sched.runq.push(gp) 5610 sched.runqsize++ 5611 } 5612 5613 // Put a batch of runnable goroutines on the global runnable queue. 5614 // This clears *batch. 5615 // sched.lock must be held. 5616 func globrunqputbatch(batch *gQueue, n int32) { 5617 assertLockHeld(&sched.lock) 5618 5619 sched.runq.pushBackAll(*batch) 5620 sched.runqsize += n 5621 *batch = gQueue{} 5622 } 5623 5624 // Try get a batch of G's from the global runnable queue. 5625 // sched.lock must be held. 5626 func globrunqget(_p_ *p, max int32) *g { 5627 assertLockHeld(&sched.lock) 5628 5629 if sched.runqsize == 0 { 5630 return nil 5631 } 5632 5633 n := sched.runqsize/gomaxprocs + 1 5634 if n > sched.runqsize { 5635 n = sched.runqsize 5636 } 5637 if max > 0 && n > max { 5638 n = max 5639 } 5640 if n > int32(len(_p_.runq))/2 { 5641 n = int32(len(_p_.runq)) / 2 5642 } 5643 5644 sched.runqsize -= n 5645 5646 gp := sched.runq.pop() 5647 n-- 5648 for ; n > 0; n-- { 5649 gp1 := sched.runq.pop() 5650 runqput(_p_, gp1, false) 5651 } 5652 return gp 5653 } 5654 5655 // pMask is an atomic bitstring with one bit per P. 5656 type pMask []uint32 5657 5658 // read returns true if P id's bit is set. 5659 func (p pMask) read(id uint32) bool { 5660 word := id / 32 5661 mask := uint32(1) << (id % 32) 5662 return (atomic.Load(&p[word]) & mask) != 0 5663 } 5664 5665 // set sets P id's bit. 5666 func (p pMask) set(id int32) { 5667 word := id / 32 5668 mask := uint32(1) << (id % 32) 5669 atomic.Or(&p[word], mask) 5670 } 5671 5672 // clear clears P id's bit. 5673 func (p pMask) clear(id int32) { 5674 word := id / 32 5675 mask := uint32(1) << (id % 32) 5676 atomic.And(&p[word], ^mask) 5677 } 5678 5679 // updateTimerPMask clears pp's timer mask if it has no timers on its heap. 5680 // 5681 // Ideally, the timer mask would be kept immediately consistent on any timer 5682 // operations. Unfortunately, updating a shared global data structure in the 5683 // timer hot path adds too much overhead in applications frequently switching 5684 // between no timers and some timers. 5685 // 5686 // As a compromise, the timer mask is updated only on pidleget / pidleput. A 5687 // running P (returned by pidleget) may add a timer at any time, so its mask 5688 // must be set. An idle P (passed to pidleput) cannot add new timers while 5689 // idle, so if it has no timers at that time, its mask may be cleared. 5690 // 5691 // Thus, we get the following effects on timer-stealing in findrunnable: 5692 // 5693 // * Idle Ps with no timers when they go idle are never checked in findrunnable 5694 // (for work- or timer-stealing; this is the ideal case). 5695 // * Running Ps must always be checked. 5696 // * Idle Ps whose timers are stolen must continue to be checked until they run 5697 // again, even after timer expiration. 5698 // 5699 // When the P starts running again, the mask should be set, as a timer may be 5700 // added at any time. 5701 // 5702 // TODO(prattmic): Additional targeted updates may improve the above cases. 5703 // e.g., updating the mask when stealing a timer. 5704 func updateTimerPMask(pp *p) { 5705 if atomic.Load(&pp.numTimers) > 0 { 5706 return 5707 } 5708 5709 // Looks like there are no timers, however another P may transiently 5710 // decrement numTimers when handling a timerModified timer in 5711 // checkTimers. We must take timersLock to serialize with these changes. 5712 lock(&pp.timersLock) 5713 if atomic.Load(&pp.numTimers) == 0 { 5714 timerpMask.clear(pp.id) 5715 } 5716 unlock(&pp.timersLock) 5717 } 5718 5719 // pidleput puts p to on the _Pidle list. 5720 // 5721 // This releases ownership of p. Once sched.lock is released it is no longer 5722 // safe to use p. 5723 // 5724 // sched.lock must be held. 5725 // 5726 // May run during STW, so write barriers are not allowed. 5727 //go:nowritebarrierrec 5728 func pidleput(_p_ *p) { 5729 assertLockHeld(&sched.lock) 5730 5731 if !runqempty(_p_) { 5732 throw("pidleput: P has non-empty run queue") 5733 } 5734 updateTimerPMask(_p_) // clear if there are no timers. 5735 idlepMask.set(_p_.id) 5736 _p_.link = sched.pidle 5737 sched.pidle.set(_p_) 5738 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 5739 } 5740 5741 // pidleget tries to get a p from the _Pidle list, acquiring ownership. 5742 // 5743 // sched.lock must be held. 5744 // 5745 // May run during STW, so write barriers are not allowed. 5746 //go:nowritebarrierrec 5747 func pidleget() *p { 5748 assertLockHeld(&sched.lock) 5749 5750 _p_ := sched.pidle.ptr() 5751 if _p_ != nil { 5752 // Timer may get added at any time now. 5753 timerpMask.set(_p_.id) 5754 idlepMask.clear(_p_.id) 5755 sched.pidle = _p_.link 5756 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 5757 } 5758 return _p_ 5759 } 5760 5761 // runqempty reports whether _p_ has no Gs on its local run queue. 5762 // It never returns true spuriously. 5763 func runqempty(_p_ *p) bool { 5764 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, 5765 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext. 5766 // Simply observing that runqhead == runqtail and then observing that runqnext == nil 5767 // does not mean the queue is empty. 5768 for { 5769 head := atomic.Load(&_p_.runqhead) 5770 tail := atomic.Load(&_p_.runqtail) 5771 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext))) 5772 if tail == atomic.Load(&_p_.runqtail) { 5773 return head == tail && runnext == 0 5774 } 5775 } 5776 } 5777 5778 // To shake out latent assumptions about scheduling order, 5779 // we introduce some randomness into scheduling decisions 5780 // when running with the race detector. 5781 // The need for this was made obvious by changing the 5782 // (deterministic) scheduling order in Go 1.5 and breaking 5783 // many poorly-written tests. 5784 // With the randomness here, as long as the tests pass 5785 // consistently with -race, they shouldn't have latent scheduling 5786 // assumptions. 5787 const randomizeScheduler = raceenabled 5788 5789 // runqput tries to put g on the local runnable queue. 5790 // If next is false, runqput adds g to the tail of the runnable queue. 5791 // If next is true, runqput puts g in the _p_.runnext slot. 5792 // If the run queue is full, runnext puts g on the global queue. 5793 // Executed only by the owner P. 5794 func runqput(_p_ *p, gp *g, next bool) { 5795 if randomizeScheduler && next && fastrand()%2 == 0 { 5796 next = false 5797 } 5798 5799 if next { 5800 retryNext: 5801 oldnext := _p_.runnext 5802 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 5803 goto retryNext 5804 } 5805 if oldnext == 0 { 5806 return 5807 } 5808 // Kick the old runnext out to the regular run queue. 5809 gp = oldnext.ptr() 5810 } 5811 5812 retry: 5813 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers 5814 t := _p_.runqtail 5815 if t-h < uint32(len(_p_.runq)) { 5816 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 5817 atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 5818 return 5819 } 5820 if runqputslow(_p_, gp, h, t) { 5821 return 5822 } 5823 // the queue is not full, now the put above must succeed 5824 goto retry 5825 } 5826 5827 // Put g and a batch of work from local runnable queue on global queue. 5828 // Executed only by the owner P. 5829 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 5830 var batch [len(_p_.runq)/2 + 1]*g 5831 5832 // First, grab a batch from local queue. 5833 n := t - h 5834 n = n / 2 5835 if n != uint32(len(_p_.runq)/2) { 5836 throw("runqputslow: queue is not full") 5837 } 5838 for i := uint32(0); i < n; i++ { 5839 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 5840 } 5841 if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume 5842 return false 5843 } 5844 batch[n] = gp 5845 5846 if randomizeScheduler { 5847 for i := uint32(1); i <= n; i++ { 5848 j := fastrandn(i + 1) 5849 batch[i], batch[j] = batch[j], batch[i] 5850 } 5851 } 5852 5853 // Link the goroutines. 5854 for i := uint32(0); i < n; i++ { 5855 batch[i].schedlink.set(batch[i+1]) 5856 } 5857 var q gQueue 5858 q.head.set(batch[0]) 5859 q.tail.set(batch[n]) 5860 5861 // Now put the batch on global queue. 5862 lock(&sched.lock) 5863 globrunqputbatch(&q, int32(n+1)) 5864 unlock(&sched.lock) 5865 return true 5866 } 5867 5868 // runqputbatch tries to put all the G's on q on the local runnable queue. 5869 // If the queue is full, they are put on the global queue; in that case 5870 // this will temporarily acquire the scheduler lock. 5871 // Executed only by the owner P. 5872 func runqputbatch(pp *p, q *gQueue, qsize int) { 5873 h := atomic.LoadAcq(&pp.runqhead) 5874 t := pp.runqtail 5875 n := uint32(0) 5876 for !q.empty() && t-h < uint32(len(pp.runq)) { 5877 gp := q.pop() 5878 pp.runq[t%uint32(len(pp.runq))].set(gp) 5879 t++ 5880 n++ 5881 } 5882 qsize -= int(n) 5883 5884 if randomizeScheduler { 5885 off := func(o uint32) uint32 { 5886 return (pp.runqtail + o) % uint32(len(pp.runq)) 5887 } 5888 for i := uint32(1); i < n; i++ { 5889 j := fastrandn(i + 1) 5890 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)] 5891 } 5892 } 5893 5894 atomic.StoreRel(&pp.runqtail, t) 5895 if !q.empty() { 5896 lock(&sched.lock) 5897 globrunqputbatch(q, int32(qsize)) 5898 unlock(&sched.lock) 5899 } 5900 } 5901 5902 // Get g from local runnable queue. 5903 // If inheritTime is true, gp should inherit the remaining time in the 5904 // current time slice. Otherwise, it should start a new time slice. 5905 // Executed only by the owner P. 5906 func runqget(_p_ *p) (gp *g, inheritTime bool) { 5907 // If there's a runnext, it's the next G to run. 5908 for { 5909 next := _p_.runnext 5910 if next == 0 { 5911 break 5912 } 5913 if _p_.runnext.cas(next, 0) { 5914 return next.ptr(), true 5915 } 5916 } 5917 5918 for { 5919 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers 5920 t := _p_.runqtail 5921 if t == h { 5922 return nil, false 5923 } 5924 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 5925 if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume 5926 return gp, false 5927 } 5928 } 5929 } 5930 5931 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 5932 // Batch is a ring buffer starting at batchHead. 5933 // Returns number of grabbed goroutines. 5934 // Can be executed by any P. 5935 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 5936 for { 5937 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers 5938 t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer 5939 n := t - h 5940 n = n - n/2 5941 if n == 0 { 5942 if stealRunNextG { 5943 // Try to steal from _p_.runnext. 5944 if next := _p_.runnext; next != 0 { 5945 if _p_.status == _Prunning { 5946 // Sleep to ensure that _p_ isn't about to run the g 5947 // we are about to steal. 5948 // The important use case here is when the g running 5949 // on _p_ ready()s another g and then almost 5950 // immediately blocks. Instead of stealing runnext 5951 // in this window, back off to give _p_ a chance to 5952 // schedule runnext. This will avoid thrashing gs 5953 // between different Ps. 5954 // A sync chan send/recv takes ~50ns as of time of 5955 // writing, so 3us gives ~50x overshoot. 5956 if GOOS != "windows" { 5957 usleep(3) 5958 } else { 5959 // On windows system timer granularity is 5960 // 1-15ms, which is way too much for this 5961 // optimization. So just yield. 5962 osyield() 5963 } 5964 } 5965 if !_p_.runnext.cas(next, 0) { 5966 continue 5967 } 5968 batch[batchHead%uint32(len(batch))] = next 5969 return 1 5970 } 5971 } 5972 return 0 5973 } 5974 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 5975 continue 5976 } 5977 for i := uint32(0); i < n; i++ { 5978 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 5979 batch[(batchHead+i)%uint32(len(batch))] = g 5980 } 5981 if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume 5982 return n 5983 } 5984 } 5985 } 5986 5987 // Steal half of elements from local runnable queue of p2 5988 // and put onto local runnable queue of p. 5989 // Returns one of the stolen elements (or nil if failed). 5990 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 5991 t := _p_.runqtail 5992 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 5993 if n == 0 { 5994 return nil 5995 } 5996 n-- 5997 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 5998 if n == 0 { 5999 return gp 6000 } 6001 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers 6002 if t-h+n >= uint32(len(_p_.runq)) { 6003 throw("runqsteal: runq overflow") 6004 } 6005 atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 6006 return gp 6007 } 6008 6009 // A gQueue is a dequeue of Gs linked through g.schedlink. A G can only 6010 // be on one gQueue or gList at a time. 6011 type gQueue struct { 6012 head guintptr 6013 tail guintptr 6014 } 6015 6016 // empty reports whether q is empty. 6017 func (q *gQueue) empty() bool { 6018 return q.head == 0 6019 } 6020 6021 // push adds gp to the head of q. 6022 func (q *gQueue) push(gp *g) { 6023 gp.schedlink = q.head 6024 q.head.set(gp) 6025 if q.tail == 0 { 6026 q.tail.set(gp) 6027 } 6028 } 6029 6030 // pushBack adds gp to the tail of q. 6031 func (q *gQueue) pushBack(gp *g) { 6032 gp.schedlink = 0 6033 if q.tail != 0 { 6034 q.tail.ptr().schedlink.set(gp) 6035 } else { 6036 q.head.set(gp) 6037 } 6038 q.tail.set(gp) 6039 } 6040 6041 // pushBackAll adds all Gs in l2 to the tail of q. After this q2 must 6042 // not be used. 6043 func (q *gQueue) pushBackAll(q2 gQueue) { 6044 if q2.tail == 0 { 6045 return 6046 } 6047 q2.tail.ptr().schedlink = 0 6048 if q.tail != 0 { 6049 q.tail.ptr().schedlink = q2.head 6050 } else { 6051 q.head = q2.head 6052 } 6053 q.tail = q2.tail 6054 } 6055 6056 // pop removes and returns the head of queue q. It returns nil if 6057 // q is empty. 6058 func (q *gQueue) pop() *g { 6059 gp := q.head.ptr() 6060 if gp != nil { 6061 q.head = gp.schedlink 6062 if q.head == 0 { 6063 q.tail = 0 6064 } 6065 } 6066 return gp 6067 } 6068 6069 // popList takes all Gs in q and returns them as a gList. 6070 func (q *gQueue) popList() gList { 6071 stack := gList{q.head} 6072 *q = gQueue{} 6073 return stack 6074 } 6075 6076 // A gList is a list of Gs linked through g.schedlink. A G can only be 6077 // on one gQueue or gList at a time. 6078 type gList struct { 6079 head guintptr 6080 } 6081 6082 // empty reports whether l is empty. 6083 func (l *gList) empty() bool { 6084 return l.head == 0 6085 } 6086 6087 // push adds gp to the head of l. 6088 func (l *gList) push(gp *g) { 6089 gp.schedlink = l.head 6090 l.head.set(gp) 6091 } 6092 6093 // pushAll prepends all Gs in q to l. 6094 func (l *gList) pushAll(q gQueue) { 6095 if !q.empty() { 6096 q.tail.ptr().schedlink = l.head 6097 l.head = q.head 6098 } 6099 } 6100 6101 // pop removes and returns the head of l. If l is empty, it returns nil. 6102 func (l *gList) pop() *g { 6103 gp := l.head.ptr() 6104 if gp != nil { 6105 l.head = gp.schedlink 6106 } 6107 return gp 6108 } 6109 6110 //go:linkname setMaxThreads runtime/debug.setMaxThreads 6111 func setMaxThreads(in int) (out int) { 6112 lock(&sched.lock) 6113 out = int(sched.maxmcount) 6114 if in > 0x7fffffff { // MaxInt32 6115 sched.maxmcount = 0x7fffffff 6116 } else { 6117 sched.maxmcount = int32(in) 6118 } 6119 checkmcount() 6120 unlock(&sched.lock) 6121 return 6122 } 6123 6124 func haveexperiment(name string) bool { 6125 x := sys.Goexperiment 6126 for x != "" { 6127 xname := "" 6128 i := bytealg.IndexByteString(x, ',') 6129 if i < 0 { 6130 xname, x = x, "" 6131 } else { 6132 xname, x = x[:i], x[i+1:] 6133 } 6134 if xname == name { 6135 return true 6136 } 6137 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name { 6138 return false 6139 } 6140 } 6141 return false 6142 } 6143 6144 //go:nosplit 6145 func procPin() int { 6146 _g_ := getg() 6147 mp := _g_.m 6148 6149 mp.locks++ 6150 return int(mp.p.ptr().id) 6151 } 6152 6153 //go:nosplit 6154 func procUnpin() { 6155 _g_ := getg() 6156 _g_.m.locks-- 6157 } 6158 6159 //go:linkname sync_runtime_procPin sync.runtime_procPin 6160 //go:nosplit 6161 func sync_runtime_procPin() int { 6162 return procPin() 6163 } 6164 6165 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 6166 //go:nosplit 6167 func sync_runtime_procUnpin() { 6168 procUnpin() 6169 } 6170 6171 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 6172 //go:nosplit 6173 func sync_atomic_runtime_procPin() int { 6174 return procPin() 6175 } 6176 6177 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 6178 //go:nosplit 6179 func sync_atomic_runtime_procUnpin() { 6180 procUnpin() 6181 } 6182 6183 // Active spinning for sync.Mutex. 6184 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 6185 //go:nosplit 6186 func sync_runtime_canSpin(i int) bool { 6187 // sync.Mutex is cooperative, so we are conservative with spinning. 6188 // Spin only few times and only if running on a multicore machine and 6189 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 6190 // As opposed to runtime mutex we don't do passive spinning here, 6191 // because there can be work on global runq or on other Ps. 6192 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 6193 return false 6194 } 6195 if p := getg().m.p.ptr(); !runqempty(p) { 6196 return false 6197 } 6198 return true 6199 } 6200 6201 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 6202 //go:nosplit 6203 func sync_runtime_doSpin() { 6204 procyield(active_spin_cnt) 6205 } 6206 6207 var stealOrder randomOrder 6208 6209 // randomOrder/randomEnum are helper types for randomized work stealing. 6210 // They allow to enumerate all Ps in different pseudo-random orders without repetitions. 6211 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS 6212 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration. 6213 type randomOrder struct { 6214 count uint32 6215 coprimes []uint32 6216 } 6217 6218 type randomEnum struct { 6219 i uint32 6220 count uint32 6221 pos uint32 6222 inc uint32 6223 } 6224 6225 func (ord *randomOrder) reset(count uint32) { 6226 ord.count = count 6227 ord.coprimes = ord.coprimes[:0] 6228 for i := uint32(1); i <= count; i++ { 6229 if gcd(i, count) == 1 { 6230 ord.coprimes = append(ord.coprimes, i) 6231 } 6232 } 6233 } 6234 6235 func (ord *randomOrder) start(i uint32) randomEnum { 6236 return randomEnum{ 6237 count: ord.count, 6238 pos: i % ord.count, 6239 inc: ord.coprimes[i%uint32(len(ord.coprimes))], 6240 } 6241 } 6242 6243 func (enum *randomEnum) done() bool { 6244 return enum.i == enum.count 6245 } 6246 6247 func (enum *randomEnum) next() { 6248 enum.i++ 6249 enum.pos = (enum.pos + enum.inc) % enum.count 6250 } 6251 6252 func (enum *randomEnum) position() uint32 { 6253 return enum.pos 6254 } 6255 6256 func gcd(a, b uint32) uint32 { 6257 for b != 0 { 6258 a, b = b, a%b 6259 } 6260 return a 6261 } 6262 6263 // An initTask represents the set of initializations that need to be done for a package. 6264 // Keep in sync with ../../test/initempty.go:initTask 6265 type initTask struct { 6266 // TODO: pack the first 3 fields more tightly? 6267 state uintptr // 0 = uninitialized, 1 = in progress, 2 = done 6268 ndeps uintptr 6269 nfns uintptr 6270 // followed by ndeps instances of an *initTask, one per package depended on 6271 // followed by nfns pcs, one per init function to run 6272 } 6273 6274 // inittrace stores statistics for init functions which are 6275 // updated by malloc and newproc when active is true. 6276 var inittrace tracestat 6277 6278 type tracestat struct { 6279 active bool // init tracing activation status 6280 id int64 // init go routine id 6281 allocs uint64 // heap allocations 6282 bytes uint64 // heap allocated bytes 6283 } 6284 6285 func doInit(t *initTask) { 6286 switch t.state { 6287 case 2: // fully initialized 6288 return 6289 case 1: // initialization in progress 6290 throw("recursive call during initialization - linker skew") 6291 default: // not initialized yet 6292 t.state = 1 // initialization in progress 6293 6294 for i := uintptr(0); i < t.ndeps; i++ { 6295 p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize) 6296 t2 := *(**initTask)(p) 6297 doInit(t2) 6298 } 6299 6300 if t.nfns == 0 { 6301 t.state = 2 // initialization done 6302 return 6303 } 6304 6305 var ( 6306 start int64 6307 before tracestat 6308 ) 6309 6310 if inittrace.active { 6311 start = nanotime() 6312 // Load stats non-atomically since tracinit is updated only by this init go routine. 6313 before = inittrace 6314 } 6315 6316 firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*sys.PtrSize) 6317 for i := uintptr(0); i < t.nfns; i++ { 6318 p := add(firstFunc, i*sys.PtrSize) 6319 f := *(*func())(unsafe.Pointer(&p)) 6320 f() 6321 } 6322 6323 if inittrace.active { 6324 end := nanotime() 6325 // Load stats non-atomically since tracinit is updated only by this init go routine. 6326 after := inittrace 6327 6328 pkg := funcpkgpath(findfunc(funcPC(firstFunc))) 6329 6330 var sbuf [24]byte 6331 print("init ", pkg, " @") 6332 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ") 6333 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ") 6334 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ") 6335 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs") 6336 print("\n") 6337 } 6338 6339 t.state = 2 // initialization done 6340 } 6341 }