github.com/AESNooper/go/src@v0.0.0-20220218095104-b56a4ab1bbbb/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/cpu" 10 "internal/goarch" 11 "runtime/internal/atomic" 12 "runtime/internal/sys" 13 "unsafe" 14 ) 15 16 // set using cmd/go/internal/modload.ModInfoProg 17 var modinfo string 18 19 // Goroutine scheduler 20 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 21 // 22 // The main concepts are: 23 // G - goroutine. 24 // M - worker thread, or machine. 25 // P - processor, a resource that is required to execute Go code. 26 // M must have an associated P to execute Go code, however it can be 27 // blocked or in a syscall w/o an associated P. 28 // 29 // Design doc at https://golang.org/s/go11sched. 30 31 // Worker thread parking/unparking. 32 // We need to balance between keeping enough running worker threads to utilize 33 // available hardware parallelism and parking excessive running worker threads 34 // to conserve CPU resources and power. This is not simple for two reasons: 35 // (1) scheduler state is intentionally distributed (in particular, per-P work 36 // queues), so it is not possible to compute global predicates on fast paths; 37 // (2) for optimal thread management we would need to know the future (don't park 38 // a worker thread when a new goroutine will be readied in near future). 39 // 40 // Three rejected approaches that would work badly: 41 // 1. Centralize all scheduler state (would inhibit scalability). 42 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there 43 // is a spare P, unpark a thread and handoff it the thread and the goroutine. 44 // This would lead to thread state thrashing, as the thread that readied the 45 // goroutine can be out of work the very next moment, we will need to park it. 46 // Also, it would destroy locality of computation as we want to preserve 47 // dependent goroutines on the same thread; and introduce additional latency. 48 // 3. Unpark an additional thread whenever we ready a goroutine and there is an 49 // idle P, but don't do handoff. This would lead to excessive thread parking/ 50 // unparking as the additional threads will instantly park without discovering 51 // any work to do. 52 // 53 // The current approach: 54 // 55 // This approach applies to three primary sources of potential work: readying a 56 // goroutine, new/modified-earlier timers, and idle-priority GC. See below for 57 // additional details. 58 // 59 // We unpark an additional thread when we submit work if (this is wakep()): 60 // 1. There is an idle P, and 61 // 2. There are no "spinning" worker threads. 62 // 63 // A worker thread is considered spinning if it is out of local work and did 64 // not find work in the global run queue or netpoller; the spinning state is 65 // denoted in m.spinning and in sched.nmspinning. Threads unparked this way are 66 // also considered spinning; we don't do goroutine handoff so such threads are 67 // out of work initially. Spinning threads spin on looking for work in per-P 68 // run queues and timer heaps or from the GC before parking. If a spinning 69 // thread finds work it takes itself out of the spinning state and proceeds to 70 // execution. If it does not find work it takes itself out of the spinning 71 // state and then parks. 72 // 73 // If there is at least one spinning thread (sched.nmspinning>1), we don't 74 // unpark new threads when submitting work. To compensate for that, if the last 75 // spinning thread finds work and stops spinning, it must unpark a new spinning 76 // thread. This approach smooths out unjustified spikes of thread unparking, 77 // but at the same time guarantees eventual maximal CPU parallelism 78 // utilization. 79 // 80 // The main implementation complication is that we need to be very careful 81 // during spinning->non-spinning thread transition. This transition can race 82 // with submission of new work, and either one part or another needs to unpark 83 // another worker thread. If they both fail to do that, we can end up with 84 // semi-persistent CPU underutilization. 85 // 86 // The general pattern for submission is: 87 // 1. Submit work to the local run queue, timer heap, or GC state. 88 // 2. #StoreLoad-style memory barrier. 89 // 3. Check sched.nmspinning. 90 // 91 // The general pattern for spinning->non-spinning transition is: 92 // 1. Decrement nmspinning. 93 // 2. #StoreLoad-style memory barrier. 94 // 3. Check all per-P work queues and GC for new work. 95 // 96 // Note that all this complexity does not apply to global run queue as we are 97 // not sloppy about thread unparking when submitting to global queue. Also see 98 // comments for nmspinning manipulation. 99 // 100 // How these different sources of work behave varies, though it doesn't affect 101 // the synchronization approach: 102 // * Ready goroutine: this is an obvious source of work; the goroutine is 103 // immediately ready and must run on some thread eventually. 104 // * New/modified-earlier timer: The current timer implementation (see time.go) 105 // uses netpoll in a thread with no work available to wait for the soonest 106 // timer. If there is no thread waiting, we want a new spinning thread to go 107 // wait. 108 // * Idle-priority GC: The GC wakes a stopped idle thread to contribute to 109 // background GC work (note: currently disabled per golang.org/issue/19112). 110 // Also see golang.org/issue/44313, as this should be extended to all GC 111 // workers. 112 113 var ( 114 m0 m 115 g0 g 116 mcache0 *mcache 117 raceprocctx0 uintptr 118 ) 119 120 //go:linkname runtime_inittask runtime..inittask 121 var runtime_inittask initTask 122 123 //go:linkname main_inittask main..inittask 124 var main_inittask initTask 125 126 // main_init_done is a signal used by cgocallbackg that initialization 127 // has been completed. It is made before _cgo_notify_runtime_init_done, 128 // so all cgo calls can rely on it existing. When main_init is complete, 129 // it is closed, meaning cgocallbackg can reliably receive from it. 130 var main_init_done chan bool 131 132 //go:linkname main_main main.main 133 func main_main() 134 135 // mainStarted indicates that the main M has started. 136 var mainStarted bool 137 138 // runtimeInitTime is the nanotime() at which the runtime started. 139 var runtimeInitTime int64 140 141 // Value to use for signal mask for newly created M's. 142 var initSigmask sigset 143 144 // The main goroutine. 145 func main() { 146 g := getg() 147 148 // Racectx of m0->g0 is used only as the parent of the main goroutine. 149 // It must not be used for anything else. 150 g.m.g0.racectx = 0 151 152 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 153 // Using decimal instead of binary GB and MB because 154 // they look nicer in the stack overflow failure message. 155 if goarch.PtrSize == 8 { 156 maxstacksize = 1000000000 157 } else { 158 maxstacksize = 250000000 159 } 160 161 // An upper limit for max stack size. Used to avoid random crashes 162 // after calling SetMaxStack and trying to allocate a stack that is too big, 163 // since stackalloc works with 32-bit sizes. 164 maxstackceiling = 2 * maxstacksize 165 166 // Allow newproc to start new Ms. 167 mainStarted = true 168 169 if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon 170 // For runtime_syscall_doAllThreadsSyscall, we 171 // register sysmon is not ready for the world to be 172 // stopped. 173 atomic.Store(&sched.sysmonStarting, 1) 174 systemstack(func() { 175 newm(sysmon, nil, -1) 176 }) 177 } 178 179 // Lock the main goroutine onto this, the main OS thread, 180 // during initialization. Most programs won't care, but a few 181 // do require certain calls to be made by the main thread. 182 // Those can arrange for main.main to run in the main thread 183 // by calling runtime.LockOSThread during initialization 184 // to preserve the lock. 185 lockOSThread() 186 187 if g.m != &m0 { 188 throw("runtime.main not on m0") 189 } 190 m0.doesPark = true 191 192 // Record when the world started. 193 // Must be before doInit for tracing init. 194 runtimeInitTime = nanotime() 195 if runtimeInitTime == 0 { 196 throw("nanotime returning zero") 197 } 198 199 if debug.inittrace != 0 { 200 inittrace.id = getg().goid 201 inittrace.active = true 202 } 203 204 doInit(&runtime_inittask) // Must be before defer. 205 206 // Defer unlock so that runtime.Goexit during init does the unlock too. 207 needUnlock := true 208 defer func() { 209 if needUnlock { 210 unlockOSThread() 211 } 212 }() 213 214 gcenable() 215 216 main_init_done = make(chan bool) 217 if iscgo { 218 if _cgo_thread_start == nil { 219 throw("_cgo_thread_start missing") 220 } 221 if GOOS != "windows" { 222 if _cgo_setenv == nil { 223 throw("_cgo_setenv missing") 224 } 225 if _cgo_unsetenv == nil { 226 throw("_cgo_unsetenv missing") 227 } 228 } 229 if _cgo_notify_runtime_init_done == nil { 230 throw("_cgo_notify_runtime_init_done missing") 231 } 232 // Start the template thread in case we enter Go from 233 // a C-created thread and need to create a new thread. 234 startTemplateThread() 235 cgocall(_cgo_notify_runtime_init_done, nil) 236 } 237 238 doInit(&main_inittask) 239 240 // Disable init tracing after main init done to avoid overhead 241 // of collecting statistics in malloc and newproc 242 inittrace.active = false 243 244 close(main_init_done) 245 246 needUnlock = false 247 unlockOSThread() 248 249 if isarchive || islibrary { 250 // A program compiled with -buildmode=c-archive or c-shared 251 // has a main, but it is not executed. 252 return 253 } 254 fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 255 fn() 256 if raceenabled { 257 racefini() 258 } 259 260 // Make racy client program work: if panicking on 261 // another goroutine at the same time as main returns, 262 // let the other goroutine finish printing the panic trace. 263 // Once it does, it will exit. See issues 3934 and 20018. 264 if atomic.Load(&runningPanicDefers) != 0 { 265 // Running deferred functions should not take long. 266 for c := 0; c < 1000; c++ { 267 if atomic.Load(&runningPanicDefers) == 0 { 268 break 269 } 270 Gosched() 271 } 272 } 273 if atomic.Load(&panicking) != 0 { 274 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1) 275 } 276 277 exit(0) 278 for { 279 var x *int32 280 *x = 0 281 } 282 } 283 284 // os_beforeExit is called from os.Exit(0). 285 //go:linkname os_beforeExit os.runtime_beforeExit 286 func os_beforeExit() { 287 if raceenabled { 288 racefini() 289 } 290 } 291 292 // start forcegc helper goroutine 293 func init() { 294 go forcegchelper() 295 } 296 297 func forcegchelper() { 298 forcegc.g = getg() 299 lockInit(&forcegc.lock, lockRankForcegc) 300 for { 301 lock(&forcegc.lock) 302 if forcegc.idle != 0 { 303 throw("forcegc: phase error") 304 } 305 atomic.Store(&forcegc.idle, 1) 306 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1) 307 // this goroutine is explicitly resumed by sysmon 308 if debug.gctrace > 0 { 309 println("GC forced") 310 } 311 // Time-triggered, fully concurrent. 312 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()}) 313 } 314 } 315 316 //go:nosplit 317 318 // Gosched yields the processor, allowing other goroutines to run. It does not 319 // suspend the current goroutine, so execution resumes automatically. 320 func Gosched() { 321 checkTimeouts() 322 mcall(gosched_m) 323 } 324 325 // goschedguarded yields the processor like gosched, but also checks 326 // for forbidden states and opts out of the yield in those cases. 327 //go:nosplit 328 func goschedguarded() { 329 mcall(goschedguarded_m) 330 } 331 332 // Puts the current goroutine into a waiting state and calls unlockf on the 333 // system stack. 334 // 335 // If unlockf returns false, the goroutine is resumed. 336 // 337 // unlockf must not access this G's stack, as it may be moved between 338 // the call to gopark and the call to unlockf. 339 // 340 // Note that because unlockf is called after putting the G into a waiting 341 // state, the G may have already been readied by the time unlockf is called 342 // unless there is external synchronization preventing the G from being 343 // readied. If unlockf returns false, it must guarantee that the G cannot be 344 // externally readied. 345 // 346 // Reason explains why the goroutine has been parked. It is displayed in stack 347 // traces and heap dumps. Reasons should be unique and descriptive. Do not 348 // re-use reasons, add new ones. 349 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) { 350 if reason != waitReasonSleep { 351 checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy 352 } 353 mp := acquirem() 354 gp := mp.curg 355 status := readgstatus(gp) 356 if status != _Grunning && status != _Gscanrunning { 357 throw("gopark: bad g status") 358 } 359 mp.waitlock = lock 360 mp.waitunlockf = unlockf 361 gp.waitreason = reason 362 mp.waittraceev = traceEv 363 mp.waittraceskip = traceskip 364 releasem(mp) 365 // can't do anything that might move the G between Ms here. 366 mcall(park_m) 367 } 368 369 // Puts the current goroutine into a waiting state and unlocks the lock. 370 // The goroutine can be made runnable again by calling goready(gp). 371 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) { 372 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 373 } 374 375 func goready(gp *g, traceskip int) { 376 systemstack(func() { 377 ready(gp, traceskip, true) 378 }) 379 } 380 381 //go:nosplit 382 func acquireSudog() *sudog { 383 // Delicate dance: the semaphore implementation calls 384 // acquireSudog, acquireSudog calls new(sudog), 385 // new calls malloc, malloc can call the garbage collector, 386 // and the garbage collector calls the semaphore implementation 387 // in stopTheWorld. 388 // Break the cycle by doing acquirem/releasem around new(sudog). 389 // The acquirem/releasem increments m.locks during new(sudog), 390 // which keeps the garbage collector from being invoked. 391 mp := acquirem() 392 pp := mp.p.ptr() 393 if len(pp.sudogcache) == 0 { 394 lock(&sched.sudoglock) 395 // First, try to grab a batch from central cache. 396 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 397 s := sched.sudogcache 398 sched.sudogcache = s.next 399 s.next = nil 400 pp.sudogcache = append(pp.sudogcache, s) 401 } 402 unlock(&sched.sudoglock) 403 // If the central cache is empty, allocate a new one. 404 if len(pp.sudogcache) == 0 { 405 pp.sudogcache = append(pp.sudogcache, new(sudog)) 406 } 407 } 408 n := len(pp.sudogcache) 409 s := pp.sudogcache[n-1] 410 pp.sudogcache[n-1] = nil 411 pp.sudogcache = pp.sudogcache[:n-1] 412 if s.elem != nil { 413 throw("acquireSudog: found s.elem != nil in cache") 414 } 415 releasem(mp) 416 return s 417 } 418 419 //go:nosplit 420 func releaseSudog(s *sudog) { 421 if s.elem != nil { 422 throw("runtime: sudog with non-nil elem") 423 } 424 if s.isSelect { 425 throw("runtime: sudog with non-false isSelect") 426 } 427 if s.next != nil { 428 throw("runtime: sudog with non-nil next") 429 } 430 if s.prev != nil { 431 throw("runtime: sudog with non-nil prev") 432 } 433 if s.waitlink != nil { 434 throw("runtime: sudog with non-nil waitlink") 435 } 436 if s.c != nil { 437 throw("runtime: sudog with non-nil c") 438 } 439 gp := getg() 440 if gp.param != nil { 441 throw("runtime: releaseSudog with non-nil gp.param") 442 } 443 mp := acquirem() // avoid rescheduling to another P 444 pp := mp.p.ptr() 445 if len(pp.sudogcache) == cap(pp.sudogcache) { 446 // Transfer half of local cache to the central cache. 447 var first, last *sudog 448 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 449 n := len(pp.sudogcache) 450 p := pp.sudogcache[n-1] 451 pp.sudogcache[n-1] = nil 452 pp.sudogcache = pp.sudogcache[:n-1] 453 if first == nil { 454 first = p 455 } else { 456 last.next = p 457 } 458 last = p 459 } 460 lock(&sched.sudoglock) 461 last.next = sched.sudogcache 462 sched.sudogcache = first 463 unlock(&sched.sudoglock) 464 } 465 pp.sudogcache = append(pp.sudogcache, s) 466 releasem(mp) 467 } 468 469 // called from assembly 470 func badmcall(fn func(*g)) { 471 throw("runtime: mcall called on m->g0 stack") 472 } 473 474 func badmcall2(fn func(*g)) { 475 throw("runtime: mcall function returned") 476 } 477 478 func badreflectcall() { 479 panic(plainError("arg size to reflect.call more than 1GB")) 480 } 481 482 var badmorestackg0Msg = "fatal: morestack on g0\n" 483 484 //go:nosplit 485 //go:nowritebarrierrec 486 func badmorestackg0() { 487 sp := stringStructOf(&badmorestackg0Msg) 488 write(2, sp.str, int32(sp.len)) 489 } 490 491 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n" 492 493 //go:nosplit 494 //go:nowritebarrierrec 495 func badmorestackgsignal() { 496 sp := stringStructOf(&badmorestackgsignalMsg) 497 write(2, sp.str, int32(sp.len)) 498 } 499 500 //go:nosplit 501 func badctxt() { 502 throw("ctxt != 0") 503 } 504 505 func lockedOSThread() bool { 506 gp := getg() 507 return gp.lockedm != 0 && gp.m.lockedg != 0 508 } 509 510 var ( 511 // allgs contains all Gs ever created (including dead Gs), and thus 512 // never shrinks. 513 // 514 // Access via the slice is protected by allglock or stop-the-world. 515 // Readers that cannot take the lock may (carefully!) use the atomic 516 // variables below. 517 allglock mutex 518 allgs []*g 519 520 // allglen and allgptr are atomic variables that contain len(allgs) and 521 // &allgs[0] respectively. Proper ordering depends on totally-ordered 522 // loads and stores. Writes are protected by allglock. 523 // 524 // allgptr is updated before allglen. Readers should read allglen 525 // before allgptr to ensure that allglen is always <= len(allgptr). New 526 // Gs appended during the race can be missed. For a consistent view of 527 // all Gs, allglock must be held. 528 // 529 // allgptr copies should always be stored as a concrete type or 530 // unsafe.Pointer, not uintptr, to ensure that GC can still reach it 531 // even if it points to a stale array. 532 allglen uintptr 533 allgptr **g 534 ) 535 536 func allgadd(gp *g) { 537 if readgstatus(gp) == _Gidle { 538 throw("allgadd: bad status Gidle") 539 } 540 541 lock(&allglock) 542 allgs = append(allgs, gp) 543 if &allgs[0] != allgptr { 544 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0])) 545 } 546 atomic.Storeuintptr(&allglen, uintptr(len(allgs))) 547 unlock(&allglock) 548 } 549 550 // allGsSnapshot returns a snapshot of the slice of all Gs. 551 // 552 // The world must be stopped or allglock must be held. 553 func allGsSnapshot() []*g { 554 assertWorldStoppedOrLockHeld(&allglock) 555 556 // Because the world is stopped or allglock is held, allgadd 557 // cannot happen concurrently with this. allgs grows 558 // monotonically and existing entries never change, so we can 559 // simply return a copy of the slice header. For added safety, 560 // we trim everything past len because that can still change. 561 return allgs[:len(allgs):len(allgs)] 562 } 563 564 // atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex. 565 func atomicAllG() (**g, uintptr) { 566 length := atomic.Loaduintptr(&allglen) 567 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr))) 568 return ptr, length 569 } 570 571 // atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG. 572 func atomicAllGIndex(ptr **g, i uintptr) *g { 573 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize)) 574 } 575 576 // forEachG calls fn on every G from allgs. 577 // 578 // forEachG takes a lock to exclude concurrent addition of new Gs. 579 func forEachG(fn func(gp *g)) { 580 lock(&allglock) 581 for _, gp := range allgs { 582 fn(gp) 583 } 584 unlock(&allglock) 585 } 586 587 // forEachGRace calls fn on every G from allgs. 588 // 589 // forEachGRace avoids locking, but does not exclude addition of new Gs during 590 // execution, which may be missed. 591 func forEachGRace(fn func(gp *g)) { 592 ptr, length := atomicAllG() 593 for i := uintptr(0); i < length; i++ { 594 gp := atomicAllGIndex(ptr, i) 595 fn(gp) 596 } 597 return 598 } 599 600 const ( 601 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 602 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 603 _GoidCacheBatch = 16 604 ) 605 606 // cpuinit extracts the environment variable GODEBUG from the environment on 607 // Unix-like operating systems and calls internal/cpu.Initialize. 608 func cpuinit() { 609 const prefix = "GODEBUG=" 610 var env string 611 612 switch GOOS { 613 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux": 614 cpu.DebugOptions = true 615 616 // Similar to goenv_unix but extracts the environment value for 617 // GODEBUG directly. 618 // TODO(moehrmann): remove when general goenvs() can be called before cpuinit() 619 n := int32(0) 620 for argv_index(argv, argc+1+n) != nil { 621 n++ 622 } 623 624 for i := int32(0); i < n; i++ { 625 p := argv_index(argv, argc+1+i) 626 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)})) 627 628 if hasPrefix(s, prefix) { 629 env = gostring(p)[len(prefix):] 630 break 631 } 632 } 633 } 634 635 cpu.Initialize(env) 636 637 // Support cpu feature variables are used in code generated by the compiler 638 // to guard execution of instructions that can not be assumed to be always supported. 639 switch GOARCH { 640 case "386", "amd64": 641 x86HasPOPCNT = cpu.X86.HasPOPCNT 642 x86HasSSE41 = cpu.X86.HasSSE41 643 x86HasFMA = cpu.X86.HasFMA 644 645 case "arm": 646 armHasVFPv4 = cpu.ARM.HasVFPv4 647 648 case "arm64": 649 arm64HasATOMICS = cpu.ARM64.HasATOMICS 650 } 651 } 652 653 // The bootstrap sequence is: 654 // 655 // call osinit 656 // call schedinit 657 // make & queue new G 658 // call runtime·mstart 659 // 660 // The new G calls runtime·main. 661 func schedinit() { 662 lockInit(&sched.lock, lockRankSched) 663 lockInit(&sched.sysmonlock, lockRankSysmon) 664 lockInit(&sched.deferlock, lockRankDefer) 665 lockInit(&sched.sudoglock, lockRankSudog) 666 lockInit(&deadlock, lockRankDeadlock) 667 lockInit(&paniclk, lockRankPanic) 668 lockInit(&allglock, lockRankAllg) 669 lockInit(&allpLock, lockRankAllp) 670 lockInit(&reflectOffs.lock, lockRankReflectOffs) 671 lockInit(&finlock, lockRankFin) 672 lockInit(&trace.bufLock, lockRankTraceBuf) 673 lockInit(&trace.stringsLock, lockRankTraceStrings) 674 lockInit(&trace.lock, lockRankTrace) 675 lockInit(&cpuprof.lock, lockRankCpuprof) 676 lockInit(&trace.stackTab.lock, lockRankTraceStackTab) 677 // Enforce that this lock is always a leaf lock. 678 // All of this lock's critical sections should be 679 // extremely short. 680 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank) 681 682 // raceinit must be the first call to race detector. 683 // In particular, it must be done before mallocinit below calls racemapshadow. 684 _g_ := getg() 685 if raceenabled { 686 _g_.racectx, raceprocctx0 = raceinit() 687 } 688 689 sched.maxmcount = 10000 690 691 // The world starts stopped. 692 worldStopped() 693 694 moduledataverify() 695 stackinit() 696 mallocinit() 697 cpuinit() // must run before alginit 698 alginit() // maps, hash, fastrand must not be used before this call 699 fastrandinit() // must run before mcommoninit 700 mcommoninit(_g_.m, -1) 701 modulesinit() // provides activeModules 702 typelinksinit() // uses maps, activeModules 703 itabsinit() // uses activeModules 704 stkobjinit() // must run before GC starts 705 706 sigsave(&_g_.m.sigmask) 707 initSigmask = _g_.m.sigmask 708 709 if offset := unsafe.Offsetof(sched.timeToRun); offset%8 != 0 { 710 println(offset) 711 throw("sched.timeToRun not aligned to 8 bytes") 712 } 713 714 goargs() 715 goenvs() 716 parsedebugvars() 717 gcinit() 718 719 lock(&sched.lock) 720 sched.lastpoll = uint64(nanotime()) 721 procs := ncpu 722 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { 723 procs = n 724 } 725 if procresize(procs) != nil { 726 throw("unknown runnable goroutine during bootstrap") 727 } 728 unlock(&sched.lock) 729 730 // World is effectively started now, as P's can run. 731 worldStarted() 732 733 // For cgocheck > 1, we turn on the write barrier at all times 734 // and check all pointer writes. We can't do this until after 735 // procresize because the write barrier needs a P. 736 if debug.cgocheck > 1 { 737 writeBarrier.cgo = true 738 writeBarrier.enabled = true 739 for _, p := range allp { 740 p.wbBuf.reset() 741 } 742 } 743 744 if buildVersion == "" { 745 // Condition should never trigger. This code just serves 746 // to ensure runtime·buildVersion is kept in the resulting binary. 747 buildVersion = "unknown" 748 } 749 if len(modinfo) == 1 { 750 // Condition should never trigger. This code just serves 751 // to ensure runtime·modinfo is kept in the resulting binary. 752 modinfo = "" 753 } 754 } 755 756 func dumpgstatus(gp *g) { 757 _g_ := getg() 758 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 759 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 760 } 761 762 // sched.lock must be held. 763 func checkmcount() { 764 assertLockHeld(&sched.lock) 765 766 if mcount() > sched.maxmcount { 767 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 768 throw("thread exhaustion") 769 } 770 } 771 772 // mReserveID returns the next ID to use for a new m. This new m is immediately 773 // considered 'running' by checkdead. 774 // 775 // sched.lock must be held. 776 func mReserveID() int64 { 777 assertLockHeld(&sched.lock) 778 779 if sched.mnext+1 < sched.mnext { 780 throw("runtime: thread ID overflow") 781 } 782 id := sched.mnext 783 sched.mnext++ 784 checkmcount() 785 return id 786 } 787 788 // Pre-allocated ID may be passed as 'id', or omitted by passing -1. 789 func mcommoninit(mp *m, id int64) { 790 _g_ := getg() 791 792 // g0 stack won't make sense for user (and is not necessary unwindable). 793 if _g_ != _g_.m.g0 { 794 callers(1, mp.createstack[:]) 795 } 796 797 lock(&sched.lock) 798 799 if id >= 0 { 800 mp.id = id 801 } else { 802 mp.id = mReserveID() 803 } 804 805 // cputicks is not very random in startup virtual machine 806 mp.fastrand = uint64(int64Hash(uint64(mp.id), fastrandseed^uintptr(cputicks()))) 807 808 mpreinit(mp) 809 if mp.gsignal != nil { 810 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 811 } 812 813 // Add to allm so garbage collector doesn't free g->m 814 // when it is just in a register or thread-local storage. 815 mp.alllink = allm 816 817 // NumCgoCall() iterates over allm w/o schedlock, 818 // so we need to publish it safely. 819 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 820 unlock(&sched.lock) 821 822 // Allocate memory to hold a cgo traceback if the cgo call crashes. 823 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" { 824 mp.cgoCallers = new(cgoCallers) 825 } 826 } 827 828 var fastrandseed uintptr 829 830 func fastrandinit() { 831 s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:] 832 getRandomData(s) 833 } 834 835 // Mark gp ready to run. 836 func ready(gp *g, traceskip int, next bool) { 837 if trace.enabled { 838 traceGoUnpark(gp, traceskip) 839 } 840 841 status := readgstatus(gp) 842 843 // Mark runnable. 844 _g_ := getg() 845 mp := acquirem() // disable preemption because it can be holding p in a local var 846 if status&^_Gscan != _Gwaiting { 847 dumpgstatus(gp) 848 throw("bad g->status in ready") 849 } 850 851 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 852 casgstatus(gp, _Gwaiting, _Grunnable) 853 runqput(_g_.m.p.ptr(), gp, next) 854 wakep() 855 releasem(mp) 856 } 857 858 // freezeStopWait is a large value that freezetheworld sets 859 // sched.stopwait to in order to request that all Gs permanently stop. 860 const freezeStopWait = 0x7fffffff 861 862 // freezing is set to non-zero if the runtime is trying to freeze the 863 // world. 864 var freezing uint32 865 866 // Similar to stopTheWorld but best-effort and can be called several times. 867 // There is no reverse operation, used during crashing. 868 // This function must not lock any mutexes. 869 func freezetheworld() { 870 atomic.Store(&freezing, 1) 871 // stopwait and preemption requests can be lost 872 // due to races with concurrently executing threads, 873 // so try several times 874 for i := 0; i < 5; i++ { 875 // this should tell the scheduler to not start any new goroutines 876 sched.stopwait = freezeStopWait 877 atomic.Store(&sched.gcwaiting, 1) 878 // this should stop running goroutines 879 if !preemptall() { 880 break // no running goroutines 881 } 882 usleep(1000) 883 } 884 // to be sure 885 usleep(1000) 886 preemptall() 887 usleep(1000) 888 } 889 890 // All reads and writes of g's status go through readgstatus, casgstatus 891 // castogscanstatus, casfrom_Gscanstatus. 892 //go:nosplit 893 func readgstatus(gp *g) uint32 { 894 return atomic.Load(&gp.atomicstatus) 895 } 896 897 // The Gscanstatuses are acting like locks and this releases them. 898 // If it proves to be a performance hit we should be able to make these 899 // simple atomic stores but for now we are going to throw if 900 // we see an inconsistent state. 901 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 902 success := false 903 904 // Check that transition is valid. 905 switch oldval { 906 default: 907 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 908 dumpgstatus(gp) 909 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 910 case _Gscanrunnable, 911 _Gscanwaiting, 912 _Gscanrunning, 913 _Gscansyscall, 914 _Gscanpreempted: 915 if newval == oldval&^_Gscan { 916 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 917 } 918 } 919 if !success { 920 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 921 dumpgstatus(gp) 922 throw("casfrom_Gscanstatus: gp->status is not in scan state") 923 } 924 releaseLockRank(lockRankGscan) 925 } 926 927 // This will return false if the gp is not in the expected status and the cas fails. 928 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 929 func castogscanstatus(gp *g, oldval, newval uint32) bool { 930 switch oldval { 931 case _Grunnable, 932 _Grunning, 933 _Gwaiting, 934 _Gsyscall: 935 if newval == oldval|_Gscan { 936 r := atomic.Cas(&gp.atomicstatus, oldval, newval) 937 if r { 938 acquireLockRank(lockRankGscan) 939 } 940 return r 941 942 } 943 } 944 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 945 throw("castogscanstatus") 946 panic("not reached") 947 } 948 949 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 950 // and casfrom_Gscanstatus instead. 951 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 952 // put it in the Gscan state is finished. 953 //go:nosplit 954 func casgstatus(gp *g, oldval, newval uint32) { 955 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 956 systemstack(func() { 957 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 958 throw("casgstatus: bad incoming values") 959 }) 960 } 961 962 acquireLockRank(lockRankGscan) 963 releaseLockRank(lockRankGscan) 964 965 // See https://golang.org/cl/21503 for justification of the yield delay. 966 const yieldDelay = 5 * 1000 967 var nextYield int64 968 969 // loop if gp->atomicstatus is in a scan state giving 970 // GC time to finish and change the state to oldval. 971 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { 972 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 973 throw("casgstatus: waiting for Gwaiting but is Grunnable") 974 } 975 if i == 0 { 976 nextYield = nanotime() + yieldDelay 977 } 978 if nanotime() < nextYield { 979 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { 980 procyield(1) 981 } 982 } else { 983 osyield() 984 nextYield = nanotime() + yieldDelay/2 985 } 986 } 987 988 // Handle tracking for scheduling latencies. 989 if oldval == _Grunning { 990 // Track every 8th time a goroutine transitions out of running. 991 if gp.trackingSeq%gTrackingPeriod == 0 { 992 gp.tracking = true 993 } 994 gp.trackingSeq++ 995 } 996 if gp.tracking { 997 if oldval == _Grunnable { 998 // We transitioned out of runnable, so measure how much 999 // time we spent in this state and add it to 1000 // runnableTime. 1001 now := nanotime() 1002 gp.runnableTime += now - gp.runnableStamp 1003 gp.runnableStamp = 0 1004 } 1005 if newval == _Grunnable { 1006 // We just transitioned into runnable, so record what 1007 // time that happened. 1008 now := nanotime() 1009 gp.runnableStamp = now 1010 } else if newval == _Grunning { 1011 // We're transitioning into running, so turn off 1012 // tracking and record how much time we spent in 1013 // runnable. 1014 gp.tracking = false 1015 sched.timeToRun.record(gp.runnableTime) 1016 gp.runnableTime = 0 1017 } 1018 } 1019 } 1020 1021 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 1022 // Returns old status. Cannot call casgstatus directly, because we are racing with an 1023 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 1024 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 1025 // it would loop waiting for the status to go back to Gwaiting, which it never will. 1026 //go:nosplit 1027 func casgcopystack(gp *g) uint32 { 1028 for { 1029 oldstatus := readgstatus(gp) &^ _Gscan 1030 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 1031 throw("copystack: bad status, not Gwaiting or Grunnable") 1032 } 1033 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 1034 return oldstatus 1035 } 1036 } 1037 } 1038 1039 // casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted. 1040 // 1041 // TODO(austin): This is the only status operation that both changes 1042 // the status and locks the _Gscan bit. Rethink this. 1043 func casGToPreemptScan(gp *g, old, new uint32) { 1044 if old != _Grunning || new != _Gscan|_Gpreempted { 1045 throw("bad g transition") 1046 } 1047 acquireLockRank(lockRankGscan) 1048 for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) { 1049 } 1050 } 1051 1052 // casGFromPreempted attempts to transition gp from _Gpreempted to 1053 // _Gwaiting. If successful, the caller is responsible for 1054 // re-scheduling gp. 1055 func casGFromPreempted(gp *g, old, new uint32) bool { 1056 if old != _Gpreempted || new != _Gwaiting { 1057 throw("bad g transition") 1058 } 1059 return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting) 1060 } 1061 1062 // stopTheWorld stops all P's from executing goroutines, interrupting 1063 // all goroutines at GC safe points and records reason as the reason 1064 // for the stop. On return, only the current goroutine's P is running. 1065 // stopTheWorld must not be called from a system stack and the caller 1066 // must not hold worldsema. The caller must call startTheWorld when 1067 // other P's should resume execution. 1068 // 1069 // stopTheWorld is safe for multiple goroutines to call at the 1070 // same time. Each will execute its own stop, and the stops will 1071 // be serialized. 1072 // 1073 // This is also used by routines that do stack dumps. If the system is 1074 // in panic or being exited, this may not reliably stop all 1075 // goroutines. 1076 func stopTheWorld(reason string) { 1077 semacquire(&worldsema) 1078 gp := getg() 1079 gp.m.preemptoff = reason 1080 systemstack(func() { 1081 // Mark the goroutine which called stopTheWorld preemptible so its 1082 // stack may be scanned. 1083 // This lets a mark worker scan us while we try to stop the world 1084 // since otherwise we could get in a mutual preemption deadlock. 1085 // We must not modify anything on the G stack because a stack shrink 1086 // may occur. A stack shrink is otherwise OK though because in order 1087 // to return from this function (and to leave the system stack) we 1088 // must have preempted all goroutines, including any attempting 1089 // to scan our stack, in which case, any stack shrinking will 1090 // have already completed by the time we exit. 1091 casgstatus(gp, _Grunning, _Gwaiting) 1092 stopTheWorldWithSema() 1093 casgstatus(gp, _Gwaiting, _Grunning) 1094 }) 1095 } 1096 1097 // startTheWorld undoes the effects of stopTheWorld. 1098 func startTheWorld() { 1099 systemstack(func() { startTheWorldWithSema(false) }) 1100 1101 // worldsema must be held over startTheWorldWithSema to ensure 1102 // gomaxprocs cannot change while worldsema is held. 1103 // 1104 // Release worldsema with direct handoff to the next waiter, but 1105 // acquirem so that semrelease1 doesn't try to yield our time. 1106 // 1107 // Otherwise if e.g. ReadMemStats is being called in a loop, 1108 // it might stomp on other attempts to stop the world, such as 1109 // for starting or ending GC. The operation this blocks is 1110 // so heavy-weight that we should just try to be as fair as 1111 // possible here. 1112 // 1113 // We don't want to just allow us to get preempted between now 1114 // and releasing the semaphore because then we keep everyone 1115 // (including, for example, GCs) waiting longer. 1116 mp := acquirem() 1117 mp.preemptoff = "" 1118 semrelease1(&worldsema, true, 0) 1119 releasem(mp) 1120 } 1121 1122 // stopTheWorldGC has the same effect as stopTheWorld, but blocks 1123 // until the GC is not running. It also blocks a GC from starting 1124 // until startTheWorldGC is called. 1125 func stopTheWorldGC(reason string) { 1126 semacquire(&gcsema) 1127 stopTheWorld(reason) 1128 } 1129 1130 // startTheWorldGC undoes the effects of stopTheWorldGC. 1131 func startTheWorldGC() { 1132 startTheWorld() 1133 semrelease(&gcsema) 1134 } 1135 1136 // Holding worldsema grants an M the right to try to stop the world. 1137 var worldsema uint32 = 1 1138 1139 // Holding gcsema grants the M the right to block a GC, and blocks 1140 // until the current GC is done. In particular, it prevents gomaxprocs 1141 // from changing concurrently. 1142 // 1143 // TODO(mknyszek): Once gomaxprocs and the execution tracer can handle 1144 // being changed/enabled during a GC, remove this. 1145 var gcsema uint32 = 1 1146 1147 // stopTheWorldWithSema is the core implementation of stopTheWorld. 1148 // The caller is responsible for acquiring worldsema and disabling 1149 // preemption first and then should stopTheWorldWithSema on the system 1150 // stack: 1151 // 1152 // semacquire(&worldsema, 0) 1153 // m.preemptoff = "reason" 1154 // systemstack(stopTheWorldWithSema) 1155 // 1156 // When finished, the caller must either call startTheWorld or undo 1157 // these three operations separately: 1158 // 1159 // m.preemptoff = "" 1160 // systemstack(startTheWorldWithSema) 1161 // semrelease(&worldsema) 1162 // 1163 // It is allowed to acquire worldsema once and then execute multiple 1164 // startTheWorldWithSema/stopTheWorldWithSema pairs. 1165 // Other P's are able to execute between successive calls to 1166 // startTheWorldWithSema and stopTheWorldWithSema. 1167 // Holding worldsema causes any other goroutines invoking 1168 // stopTheWorld to block. 1169 func stopTheWorldWithSema() { 1170 _g_ := getg() 1171 1172 // If we hold a lock, then we won't be able to stop another M 1173 // that is blocked trying to acquire the lock. 1174 if _g_.m.locks > 0 { 1175 throw("stopTheWorld: holding locks") 1176 } 1177 1178 lock(&sched.lock) 1179 sched.stopwait = gomaxprocs 1180 atomic.Store(&sched.gcwaiting, 1) 1181 preemptall() 1182 // stop current P 1183 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 1184 sched.stopwait-- 1185 // try to retake all P's in Psyscall status 1186 for _, p := range allp { 1187 s := p.status 1188 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 1189 if trace.enabled { 1190 traceGoSysBlock(p) 1191 traceProcStop(p) 1192 } 1193 p.syscalltick++ 1194 sched.stopwait-- 1195 } 1196 } 1197 // stop idle P's 1198 for { 1199 p := pidleget() 1200 if p == nil { 1201 break 1202 } 1203 p.status = _Pgcstop 1204 sched.stopwait-- 1205 } 1206 wait := sched.stopwait > 0 1207 unlock(&sched.lock) 1208 1209 // wait for remaining P's to stop voluntarily 1210 if wait { 1211 for { 1212 // wait for 100us, then try to re-preempt in case of any races 1213 if notetsleep(&sched.stopnote, 100*1000) { 1214 noteclear(&sched.stopnote) 1215 break 1216 } 1217 preemptall() 1218 } 1219 } 1220 1221 // sanity checks 1222 bad := "" 1223 if sched.stopwait != 0 { 1224 bad = "stopTheWorld: not stopped (stopwait != 0)" 1225 } else { 1226 for _, p := range allp { 1227 if p.status != _Pgcstop { 1228 bad = "stopTheWorld: not stopped (status != _Pgcstop)" 1229 } 1230 } 1231 } 1232 if atomic.Load(&freezing) != 0 { 1233 // Some other thread is panicking. This can cause the 1234 // sanity checks above to fail if the panic happens in 1235 // the signal handler on a stopped thread. Either way, 1236 // we should halt this thread. 1237 lock(&deadlock) 1238 lock(&deadlock) 1239 } 1240 if bad != "" { 1241 throw(bad) 1242 } 1243 1244 worldStopped() 1245 } 1246 1247 func startTheWorldWithSema(emitTraceEvent bool) int64 { 1248 assertWorldStopped() 1249 1250 mp := acquirem() // disable preemption because it can be holding p in a local var 1251 if netpollinited() { 1252 list := netpoll(0) // non-blocking 1253 injectglist(&list) 1254 } 1255 lock(&sched.lock) 1256 1257 procs := gomaxprocs 1258 if newprocs != 0 { 1259 procs = newprocs 1260 newprocs = 0 1261 } 1262 p1 := procresize(procs) 1263 sched.gcwaiting = 0 1264 if sched.sysmonwait != 0 { 1265 sched.sysmonwait = 0 1266 notewakeup(&sched.sysmonnote) 1267 } 1268 unlock(&sched.lock) 1269 1270 worldStarted() 1271 1272 for p1 != nil { 1273 p := p1 1274 p1 = p1.link.ptr() 1275 if p.m != 0 { 1276 mp := p.m.ptr() 1277 p.m = 0 1278 if mp.nextp != 0 { 1279 throw("startTheWorld: inconsistent mp->nextp") 1280 } 1281 mp.nextp.set(p) 1282 notewakeup(&mp.park) 1283 } else { 1284 // Start M to run P. Do not start another M below. 1285 newm(nil, p, -1) 1286 } 1287 } 1288 1289 // Capture start-the-world time before doing clean-up tasks. 1290 startTime := nanotime() 1291 if emitTraceEvent { 1292 traceGCSTWDone() 1293 } 1294 1295 // Wakeup an additional proc in case we have excessive runnable goroutines 1296 // in local queues or in the global queue. If we don't, the proc will park itself. 1297 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 1298 wakep() 1299 1300 releasem(mp) 1301 1302 return startTime 1303 } 1304 1305 // usesLibcall indicates whether this runtime performs system calls 1306 // via libcall. 1307 func usesLibcall() bool { 1308 switch GOOS { 1309 case "aix", "darwin", "illumos", "ios", "solaris", "windows": 1310 return true 1311 case "openbsd": 1312 return GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64" 1313 } 1314 return false 1315 } 1316 1317 // mStackIsSystemAllocated indicates whether this runtime starts on a 1318 // system-allocated stack. 1319 func mStackIsSystemAllocated() bool { 1320 switch GOOS { 1321 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows": 1322 return true 1323 case "openbsd": 1324 switch GOARCH { 1325 case "386", "amd64", "arm", "arm64": 1326 return true 1327 } 1328 } 1329 return false 1330 } 1331 1332 // mstart is the entry-point for new Ms. 1333 // It is written in assembly, uses ABI0, is marked TOPFRAME, and calls mstart0. 1334 func mstart() 1335 1336 // mstart0 is the Go entry-point for new Ms. 1337 // This must not split the stack because we may not even have stack 1338 // bounds set up yet. 1339 // 1340 // May run during STW (because it doesn't have a P yet), so write 1341 // barriers are not allowed. 1342 // 1343 //go:nosplit 1344 //go:nowritebarrierrec 1345 func mstart0() { 1346 _g_ := getg() 1347 1348 osStack := _g_.stack.lo == 0 1349 if osStack { 1350 // Initialize stack bounds from system stack. 1351 // Cgo may have left stack size in stack.hi. 1352 // minit may update the stack bounds. 1353 // 1354 // Note: these bounds may not be very accurate. 1355 // We set hi to &size, but there are things above 1356 // it. The 1024 is supposed to compensate this, 1357 // but is somewhat arbitrary. 1358 size := _g_.stack.hi 1359 if size == 0 { 1360 size = 8192 * sys.StackGuardMultiplier 1361 } 1362 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1363 _g_.stack.lo = _g_.stack.hi - size + 1024 1364 } 1365 // Initialize stack guard so that we can start calling regular 1366 // Go code. 1367 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1368 // This is the g0, so we can also call go:systemstack 1369 // functions, which check stackguard1. 1370 _g_.stackguard1 = _g_.stackguard0 1371 mstart1() 1372 1373 // Exit this thread. 1374 if mStackIsSystemAllocated() { 1375 // Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate 1376 // the stack, but put it in _g_.stack before mstart, 1377 // so the logic above hasn't set osStack yet. 1378 osStack = true 1379 } 1380 mexit(osStack) 1381 } 1382 1383 // The go:noinline is to guarantee the getcallerpc/getcallersp below are safe, 1384 // so that we can set up g0.sched to return to the call of mstart1 above. 1385 //go:noinline 1386 func mstart1() { 1387 _g_ := getg() 1388 1389 if _g_ != _g_.m.g0 { 1390 throw("bad runtime·mstart") 1391 } 1392 1393 // Set up m.g0.sched as a label returning to just 1394 // after the mstart1 call in mstart0 above, for use by goexit0 and mcall. 1395 // We're never coming back to mstart1 after we call schedule, 1396 // so other calls can reuse the current frame. 1397 // And goexit0 does a gogo that needs to return from mstart1 1398 // and let mstart0 exit the thread. 1399 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 1400 _g_.sched.pc = getcallerpc() 1401 _g_.sched.sp = getcallersp() 1402 1403 asminit() 1404 minit() 1405 1406 // Install signal handlers; after minit so that minit can 1407 // prepare the thread to be able to handle the signals. 1408 if _g_.m == &m0 { 1409 mstartm0() 1410 } 1411 1412 if fn := _g_.m.mstartfn; fn != nil { 1413 fn() 1414 } 1415 1416 if _g_.m != &m0 { 1417 acquirep(_g_.m.nextp.ptr()) 1418 _g_.m.nextp = 0 1419 } 1420 schedule() 1421 } 1422 1423 // mstartm0 implements part of mstart1 that only runs on the m0. 1424 // 1425 // Write barriers are allowed here because we know the GC can't be 1426 // running yet, so they'll be no-ops. 1427 // 1428 //go:yeswritebarrierrec 1429 func mstartm0() { 1430 // Create an extra M for callbacks on threads not created by Go. 1431 // An extra M is also needed on Windows for callbacks created by 1432 // syscall.NewCallback. See issue #6751 for details. 1433 if (iscgo || GOOS == "windows") && !cgoHasExtraM { 1434 cgoHasExtraM = true 1435 newextram() 1436 } 1437 initsig(false) 1438 } 1439 1440 // mPark causes a thread to park itself - temporarily waking for 1441 // fixups but otherwise waiting to be fully woken. This is the 1442 // only way that m's should park themselves. 1443 //go:nosplit 1444 func mPark() { 1445 g := getg() 1446 for { 1447 notesleep(&g.m.park) 1448 // Note, because of signal handling by this parked m, 1449 // a preemptive mDoFixup() may actually occur via 1450 // mDoFixupAndOSYield(). (See golang.org/issue/44193) 1451 noteclear(&g.m.park) 1452 if !mDoFixup() { 1453 return 1454 } 1455 } 1456 } 1457 1458 // mexit tears down and exits the current thread. 1459 // 1460 // Don't call this directly to exit the thread, since it must run at 1461 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to 1462 // unwind the stack to the point that exits the thread. 1463 // 1464 // It is entered with m.p != nil, so write barriers are allowed. It 1465 // will release the P before exiting. 1466 // 1467 //go:yeswritebarrierrec 1468 func mexit(osStack bool) { 1469 g := getg() 1470 m := g.m 1471 1472 if m == &m0 { 1473 // This is the main thread. Just wedge it. 1474 // 1475 // On Linux, exiting the main thread puts the process 1476 // into a non-waitable zombie state. On Plan 9, 1477 // exiting the main thread unblocks wait even though 1478 // other threads are still running. On Solaris we can 1479 // neither exitThread nor return from mstart. Other 1480 // bad things probably happen on other platforms. 1481 // 1482 // We could try to clean up this M more before wedging 1483 // it, but that complicates signal handling. 1484 handoffp(releasep()) 1485 lock(&sched.lock) 1486 sched.nmfreed++ 1487 checkdead() 1488 unlock(&sched.lock) 1489 mPark() 1490 throw("locked m0 woke up") 1491 } 1492 1493 sigblock(true) 1494 unminit() 1495 1496 // Free the gsignal stack. 1497 if m.gsignal != nil { 1498 stackfree(m.gsignal.stack) 1499 // On some platforms, when calling into VDSO (e.g. nanotime) 1500 // we store our g on the gsignal stack, if there is one. 1501 // Now the stack is freed, unlink it from the m, so we 1502 // won't write to it when calling VDSO code. 1503 m.gsignal = nil 1504 } 1505 1506 // Remove m from allm. 1507 lock(&sched.lock) 1508 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink { 1509 if *pprev == m { 1510 *pprev = m.alllink 1511 goto found 1512 } 1513 } 1514 throw("m not found in allm") 1515 found: 1516 if !osStack { 1517 // Delay reaping m until it's done with the stack. 1518 // 1519 // If this is using an OS stack, the OS will free it 1520 // so there's no need for reaping. 1521 atomic.Store(&m.freeWait, 1) 1522 // Put m on the free list, though it will not be reaped until 1523 // freeWait is 0. Note that the free list must not be linked 1524 // through alllink because some functions walk allm without 1525 // locking, so may be using alllink. 1526 m.freelink = sched.freem 1527 sched.freem = m 1528 } 1529 unlock(&sched.lock) 1530 1531 atomic.Xadd64(&ncgocall, int64(m.ncgocall)) 1532 1533 // Release the P. 1534 handoffp(releasep()) 1535 // After this point we must not have write barriers. 1536 1537 // Invoke the deadlock detector. This must happen after 1538 // handoffp because it may have started a new M to take our 1539 // P's work. 1540 lock(&sched.lock) 1541 sched.nmfreed++ 1542 checkdead() 1543 unlock(&sched.lock) 1544 1545 if GOOS == "darwin" || GOOS == "ios" { 1546 // Make sure pendingPreemptSignals is correct when an M exits. 1547 // For #41702. 1548 if atomic.Load(&m.signalPending) != 0 { 1549 atomic.Xadd(&pendingPreemptSignals, -1) 1550 } 1551 } 1552 1553 // Destroy all allocated resources. After this is called, we may no 1554 // longer take any locks. 1555 mdestroy(m) 1556 1557 if osStack { 1558 // Return from mstart and let the system thread 1559 // library free the g0 stack and terminate the thread. 1560 return 1561 } 1562 1563 // mstart is the thread's entry point, so there's nothing to 1564 // return to. Exit the thread directly. exitThread will clear 1565 // m.freeWait when it's done with the stack and the m can be 1566 // reaped. 1567 exitThread(&m.freeWait) 1568 } 1569 1570 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1571 // If a P is currently executing code, this will bring the P to a GC 1572 // safe point and execute fn on that P. If the P is not executing code 1573 // (it is idle or in a syscall), this will call fn(p) directly while 1574 // preventing the P from exiting its state. This does not ensure that 1575 // fn will run on every CPU executing Go code, but it acts as a global 1576 // memory barrier. GC uses this as a "ragged barrier." 1577 // 1578 // The caller must hold worldsema. 1579 // 1580 //go:systemstack 1581 func forEachP(fn func(*p)) { 1582 mp := acquirem() 1583 _p_ := getg().m.p.ptr() 1584 1585 lock(&sched.lock) 1586 if sched.safePointWait != 0 { 1587 throw("forEachP: sched.safePointWait != 0") 1588 } 1589 sched.safePointWait = gomaxprocs - 1 1590 sched.safePointFn = fn 1591 1592 // Ask all Ps to run the safe point function. 1593 for _, p := range allp { 1594 if p != _p_ { 1595 atomic.Store(&p.runSafePointFn, 1) 1596 } 1597 } 1598 preemptall() 1599 1600 // Any P entering _Pidle or _Psyscall from now on will observe 1601 // p.runSafePointFn == 1 and will call runSafePointFn when 1602 // changing its status to _Pidle/_Psyscall. 1603 1604 // Run safe point function for all idle Ps. sched.pidle will 1605 // not change because we hold sched.lock. 1606 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1607 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1608 fn(p) 1609 sched.safePointWait-- 1610 } 1611 } 1612 1613 wait := sched.safePointWait > 0 1614 unlock(&sched.lock) 1615 1616 // Run fn for the current P. 1617 fn(_p_) 1618 1619 // Force Ps currently in _Psyscall into _Pidle and hand them 1620 // off to induce safe point function execution. 1621 for _, p := range allp { 1622 s := p.status 1623 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1624 if trace.enabled { 1625 traceGoSysBlock(p) 1626 traceProcStop(p) 1627 } 1628 p.syscalltick++ 1629 handoffp(p) 1630 } 1631 } 1632 1633 // Wait for remaining Ps to run fn. 1634 if wait { 1635 for { 1636 // Wait for 100us, then try to re-preempt in 1637 // case of any races. 1638 // 1639 // Requires system stack. 1640 if notetsleep(&sched.safePointNote, 100*1000) { 1641 noteclear(&sched.safePointNote) 1642 break 1643 } 1644 preemptall() 1645 } 1646 } 1647 if sched.safePointWait != 0 { 1648 throw("forEachP: not done") 1649 } 1650 for _, p := range allp { 1651 if p.runSafePointFn != 0 { 1652 throw("forEachP: P did not run fn") 1653 } 1654 } 1655 1656 lock(&sched.lock) 1657 sched.safePointFn = nil 1658 unlock(&sched.lock) 1659 releasem(mp) 1660 } 1661 1662 // syscall_runtime_doAllThreadsSyscall serializes Go execution and 1663 // executes a specified fn() call on all m's. 1664 // 1665 // The boolean argument to fn() indicates whether the function's 1666 // return value will be consulted or not. That is, fn(true) should 1667 // return true if fn() succeeds, and fn(true) should return false if 1668 // it failed. When fn(false) is called, its return status will be 1669 // ignored. 1670 // 1671 // syscall_runtime_doAllThreadsSyscall first invokes fn(true) on a 1672 // single, coordinating, m, and only if it returns true does it go on 1673 // to invoke fn(false) on all of the other m's known to the process. 1674 // 1675 //go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall 1676 func syscall_runtime_doAllThreadsSyscall(fn func(bool) bool) { 1677 if iscgo { 1678 panic("doAllThreadsSyscall not supported with cgo enabled") 1679 } 1680 if fn == nil { 1681 return 1682 } 1683 for atomic.Load(&sched.sysmonStarting) != 0 { 1684 osyield() 1685 } 1686 1687 // We don't want this thread to handle signals for the 1688 // duration of this critical section. The underlying issue 1689 // being that this locked coordinating m is the one monitoring 1690 // for fn() execution by all the other m's of the runtime, 1691 // while no regular go code execution is permitted (the world 1692 // is stopped). If this present m were to get distracted to 1693 // run signal handling code, and find itself waiting for a 1694 // second thread to execute go code before being able to 1695 // return from that signal handling, a deadlock will result. 1696 // (See golang.org/issue/44193.) 1697 lockOSThread() 1698 var sigmask sigset 1699 sigsave(&sigmask) 1700 sigblock(false) 1701 1702 stopTheWorldGC("doAllThreadsSyscall") 1703 if atomic.Load(&newmHandoff.haveTemplateThread) != 0 { 1704 // Ensure that there are no in-flight thread 1705 // creations: don't want to race with allm. 1706 lock(&newmHandoff.lock) 1707 for !newmHandoff.waiting { 1708 unlock(&newmHandoff.lock) 1709 osyield() 1710 lock(&newmHandoff.lock) 1711 } 1712 unlock(&newmHandoff.lock) 1713 } 1714 if netpollinited() { 1715 netpollBreak() 1716 } 1717 sigRecvPrepareForFixup() 1718 _g_ := getg() 1719 if raceenabled { 1720 // For m's running without racectx, we loan out the 1721 // racectx of this call. 1722 lock(&mFixupRace.lock) 1723 mFixupRace.ctx = _g_.racectx 1724 unlock(&mFixupRace.lock) 1725 } 1726 if ok := fn(true); ok { 1727 tid := _g_.m.procid 1728 for mp := allm; mp != nil; mp = mp.alllink { 1729 if mp.procid == tid { 1730 // This m has already completed fn() 1731 // call. 1732 continue 1733 } 1734 // Be wary of mp's without procid values if 1735 // they are known not to park. If they are 1736 // marked as parking with a zero procid, then 1737 // they will be racing with this code to be 1738 // allocated a procid and we will annotate 1739 // them with the need to execute the fn when 1740 // they acquire a procid to run it. 1741 if mp.procid == 0 && !mp.doesPark { 1742 // Reaching here, we are either 1743 // running Windows, or cgo linked 1744 // code. Neither of which are 1745 // currently supported by this API. 1746 throw("unsupported runtime environment") 1747 } 1748 // stopTheWorldGC() doesn't guarantee stopping 1749 // all the threads, so we lock here to avoid 1750 // the possibility of racing with mp. 1751 lock(&mp.mFixup.lock) 1752 mp.mFixup.fn = fn 1753 atomic.Store(&mp.mFixup.used, 1) 1754 if mp.doesPark { 1755 // For non-service threads this will 1756 // cause the wakeup to be short lived 1757 // (once the mutex is unlocked). The 1758 // next real wakeup will occur after 1759 // startTheWorldGC() is called. 1760 notewakeup(&mp.park) 1761 } 1762 unlock(&mp.mFixup.lock) 1763 } 1764 for { 1765 done := true 1766 for mp := allm; done && mp != nil; mp = mp.alllink { 1767 if mp.procid == tid { 1768 continue 1769 } 1770 done = atomic.Load(&mp.mFixup.used) == 0 1771 } 1772 if done { 1773 break 1774 } 1775 // if needed force sysmon and/or newmHandoff to wakeup. 1776 lock(&sched.lock) 1777 if atomic.Load(&sched.sysmonwait) != 0 { 1778 atomic.Store(&sched.sysmonwait, 0) 1779 notewakeup(&sched.sysmonnote) 1780 } 1781 unlock(&sched.lock) 1782 lock(&newmHandoff.lock) 1783 if newmHandoff.waiting { 1784 newmHandoff.waiting = false 1785 notewakeup(&newmHandoff.wake) 1786 } 1787 unlock(&newmHandoff.lock) 1788 osyield() 1789 } 1790 } 1791 if raceenabled { 1792 lock(&mFixupRace.lock) 1793 mFixupRace.ctx = 0 1794 unlock(&mFixupRace.lock) 1795 } 1796 startTheWorldGC() 1797 msigrestore(sigmask) 1798 unlockOSThread() 1799 } 1800 1801 // runSafePointFn runs the safe point function, if any, for this P. 1802 // This should be called like 1803 // 1804 // if getg().m.p.runSafePointFn != 0 { 1805 // runSafePointFn() 1806 // } 1807 // 1808 // runSafePointFn must be checked on any transition in to _Pidle or 1809 // _Psyscall to avoid a race where forEachP sees that the P is running 1810 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1811 // nor the P run the safe-point function. 1812 func runSafePointFn() { 1813 p := getg().m.p.ptr() 1814 // Resolve the race between forEachP running the safe-point 1815 // function on this P's behalf and this P running the 1816 // safe-point function directly. 1817 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1818 return 1819 } 1820 sched.safePointFn(p) 1821 lock(&sched.lock) 1822 sched.safePointWait-- 1823 if sched.safePointWait == 0 { 1824 notewakeup(&sched.safePointNote) 1825 } 1826 unlock(&sched.lock) 1827 } 1828 1829 // When running with cgo, we call _cgo_thread_start 1830 // to start threads for us so that we can play nicely with 1831 // foreign code. 1832 var cgoThreadStart unsafe.Pointer 1833 1834 type cgothreadstart struct { 1835 g guintptr 1836 tls *uint64 1837 fn unsafe.Pointer 1838 } 1839 1840 // Allocate a new m unassociated with any thread. 1841 // Can use p for allocation context if needed. 1842 // fn is recorded as the new m's m.mstartfn. 1843 // id is optional pre-allocated m ID. Omit by passing -1. 1844 // 1845 // This function is allowed to have write barriers even if the caller 1846 // isn't because it borrows _p_. 1847 // 1848 //go:yeswritebarrierrec 1849 func allocm(_p_ *p, fn func(), id int64) *m { 1850 _g_ := getg() 1851 acquirem() // disable GC because it can be called from sysmon 1852 if _g_.m.p == 0 { 1853 acquirep(_p_) // temporarily borrow p for mallocs in this function 1854 } 1855 1856 // Release the free M list. We need to do this somewhere and 1857 // this may free up a stack we can use. 1858 if sched.freem != nil { 1859 lock(&sched.lock) 1860 var newList *m 1861 for freem := sched.freem; freem != nil; { 1862 if freem.freeWait != 0 { 1863 next := freem.freelink 1864 freem.freelink = newList 1865 newList = freem 1866 freem = next 1867 continue 1868 } 1869 // stackfree must be on the system stack, but allocm is 1870 // reachable off the system stack transitively from 1871 // startm. 1872 systemstack(func() { 1873 stackfree(freem.g0.stack) 1874 }) 1875 freem = freem.freelink 1876 } 1877 sched.freem = newList 1878 unlock(&sched.lock) 1879 } 1880 1881 mp := new(m) 1882 mp.mstartfn = fn 1883 mcommoninit(mp, id) 1884 1885 // In case of cgo or Solaris or illumos or Darwin, pthread_create will make us a stack. 1886 // Windows and Plan 9 will layout sched stack on OS stack. 1887 if iscgo || mStackIsSystemAllocated() { 1888 mp.g0 = malg(-1) 1889 } else { 1890 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1891 } 1892 mp.g0.m = mp 1893 1894 if _p_ == _g_.m.p.ptr() { 1895 releasep() 1896 } 1897 releasem(_g_.m) 1898 1899 return mp 1900 } 1901 1902 // needm is called when a cgo callback happens on a 1903 // thread without an m (a thread not created by Go). 1904 // In this case, needm is expected to find an m to use 1905 // and return with m, g initialized correctly. 1906 // Since m and g are not set now (likely nil, but see below) 1907 // needm is limited in what routines it can call. In particular 1908 // it can only call nosplit functions (textflag 7) and cannot 1909 // do any scheduling that requires an m. 1910 // 1911 // In order to avoid needing heavy lifting here, we adopt 1912 // the following strategy: there is a stack of available m's 1913 // that can be stolen. Using compare-and-swap 1914 // to pop from the stack has ABA races, so we simulate 1915 // a lock by doing an exchange (via Casuintptr) to steal the stack 1916 // head and replace the top pointer with MLOCKED (1). 1917 // This serves as a simple spin lock that we can use even 1918 // without an m. The thread that locks the stack in this way 1919 // unlocks the stack by storing a valid stack head pointer. 1920 // 1921 // In order to make sure that there is always an m structure 1922 // available to be stolen, we maintain the invariant that there 1923 // is always one more than needed. At the beginning of the 1924 // program (if cgo is in use) the list is seeded with a single m. 1925 // If needm finds that it has taken the last m off the list, its job 1926 // is - once it has installed its own m so that it can do things like 1927 // allocate memory - to create a spare m and put it on the list. 1928 // 1929 // Each of these extra m's also has a g0 and a curg that are 1930 // pressed into service as the scheduling stack and current 1931 // goroutine for the duration of the cgo callback. 1932 // 1933 // When the callback is done with the m, it calls dropm to 1934 // put the m back on the list. 1935 //go:nosplit 1936 func needm() { 1937 if (iscgo || GOOS == "windows") && !cgoHasExtraM { 1938 // Can happen if C/C++ code calls Go from a global ctor. 1939 // Can also happen on Windows if a global ctor uses a 1940 // callback created by syscall.NewCallback. See issue #6751 1941 // for details. 1942 // 1943 // Can not throw, because scheduler is not initialized yet. 1944 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1945 exit(1) 1946 } 1947 1948 // Save and block signals before getting an M. 1949 // The signal handler may call needm itself, 1950 // and we must avoid a deadlock. Also, once g is installed, 1951 // any incoming signals will try to execute, 1952 // but we won't have the sigaltstack settings and other data 1953 // set up appropriately until the end of minit, which will 1954 // unblock the signals. This is the same dance as when 1955 // starting a new m to run Go code via newosproc. 1956 var sigmask sigset 1957 sigsave(&sigmask) 1958 sigblock(false) 1959 1960 // Lock extra list, take head, unlock popped list. 1961 // nilokay=false is safe here because of the invariant above, 1962 // that the extra list always contains or will soon contain 1963 // at least one m. 1964 mp := lockextra(false) 1965 1966 // Set needextram when we've just emptied the list, 1967 // so that the eventual call into cgocallbackg will 1968 // allocate a new m for the extra list. We delay the 1969 // allocation until then so that it can be done 1970 // after exitsyscall makes sure it is okay to be 1971 // running at all (that is, there's no garbage collection 1972 // running right now). 1973 mp.needextram = mp.schedlink == 0 1974 extraMCount-- 1975 unlockextra(mp.schedlink.ptr()) 1976 1977 // Store the original signal mask for use by minit. 1978 mp.sigmask = sigmask 1979 1980 // Install TLS on some platforms (previously setg 1981 // would do this if necessary). 1982 osSetupTLS(mp) 1983 1984 // Install g (= m->g0) and set the stack bounds 1985 // to match the current stack. We don't actually know 1986 // how big the stack is, like we don't know how big any 1987 // scheduling stack is, but we assume there's at least 32 kB, 1988 // which is more than enough for us. 1989 setg(mp.g0) 1990 _g_ := getg() 1991 _g_.stack.hi = getcallersp() + 1024 1992 _g_.stack.lo = getcallersp() - 32*1024 1993 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1994 1995 // Initialize this thread to use the m. 1996 asminit() 1997 minit() 1998 1999 // mp.curg is now a real goroutine. 2000 casgstatus(mp.curg, _Gdead, _Gsyscall) 2001 atomic.Xadd(&sched.ngsys, -1) 2002 } 2003 2004 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 2005 2006 // newextram allocates m's and puts them on the extra list. 2007 // It is called with a working local m, so that it can do things 2008 // like call schedlock and allocate. 2009 func newextram() { 2010 c := atomic.Xchg(&extraMWaiters, 0) 2011 if c > 0 { 2012 for i := uint32(0); i < c; i++ { 2013 oneNewExtraM() 2014 } 2015 } else { 2016 // Make sure there is at least one extra M. 2017 mp := lockextra(true) 2018 unlockextra(mp) 2019 if mp == nil { 2020 oneNewExtraM() 2021 } 2022 } 2023 } 2024 2025 // oneNewExtraM allocates an m and puts it on the extra list. 2026 func oneNewExtraM() { 2027 // Create extra goroutine locked to extra m. 2028 // The goroutine is the context in which the cgo callback will run. 2029 // The sched.pc will never be returned to, but setting it to 2030 // goexit makes clear to the traceback routines where 2031 // the goroutine stack ends. 2032 mp := allocm(nil, nil, -1) 2033 gp := malg(4096) 2034 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum 2035 gp.sched.sp = gp.stack.hi 2036 gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame 2037 gp.sched.lr = 0 2038 gp.sched.g = guintptr(unsafe.Pointer(gp)) 2039 gp.syscallpc = gp.sched.pc 2040 gp.syscallsp = gp.sched.sp 2041 gp.stktopsp = gp.sched.sp 2042 // malg returns status as _Gidle. Change to _Gdead before 2043 // adding to allg where GC can see it. We use _Gdead to hide 2044 // this from tracebacks and stack scans since it isn't a 2045 // "real" goroutine until needm grabs it. 2046 casgstatus(gp, _Gidle, _Gdead) 2047 gp.m = mp 2048 mp.curg = gp 2049 mp.lockedInt++ 2050 mp.lockedg.set(gp) 2051 gp.lockedm.set(mp) 2052 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 2053 if raceenabled { 2054 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum) 2055 } 2056 // put on allg for garbage collector 2057 allgadd(gp) 2058 2059 // gp is now on the allg list, but we don't want it to be 2060 // counted by gcount. It would be more "proper" to increment 2061 // sched.ngfree, but that requires locking. Incrementing ngsys 2062 // has the same effect. 2063 atomic.Xadd(&sched.ngsys, +1) 2064 2065 // Add m to the extra list. 2066 mnext := lockextra(true) 2067 mp.schedlink.set(mnext) 2068 extraMCount++ 2069 unlockextra(mp) 2070 } 2071 2072 // dropm is called when a cgo callback has called needm but is now 2073 // done with the callback and returning back into the non-Go thread. 2074 // It puts the current m back onto the extra list. 2075 // 2076 // The main expense here is the call to signalstack to release the 2077 // m's signal stack, and then the call to needm on the next callback 2078 // from this thread. It is tempting to try to save the m for next time, 2079 // which would eliminate both these costs, but there might not be 2080 // a next time: the current thread (which Go does not control) might exit. 2081 // If we saved the m for that thread, there would be an m leak each time 2082 // such a thread exited. Instead, we acquire and release an m on each 2083 // call. These should typically not be scheduling operations, just a few 2084 // atomics, so the cost should be small. 2085 // 2086 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 2087 // variable using pthread_key_create. Unlike the pthread keys we already use 2088 // on OS X, this dummy key would never be read by Go code. It would exist 2089 // only so that we could register at thread-exit-time destructor. 2090 // That destructor would put the m back onto the extra list. 2091 // This is purely a performance optimization. The current version, 2092 // in which dropm happens on each cgo call, is still correct too. 2093 // We may have to keep the current version on systems with cgo 2094 // but without pthreads, like Windows. 2095 func dropm() { 2096 // Clear m and g, and return m to the extra list. 2097 // After the call to setg we can only call nosplit functions 2098 // with no pointer manipulation. 2099 mp := getg().m 2100 2101 // Return mp.curg to dead state. 2102 casgstatus(mp.curg, _Gsyscall, _Gdead) 2103 mp.curg.preemptStop = false 2104 atomic.Xadd(&sched.ngsys, +1) 2105 2106 // Block signals before unminit. 2107 // Unminit unregisters the signal handling stack (but needs g on some systems). 2108 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 2109 // It's important not to try to handle a signal between those two steps. 2110 sigmask := mp.sigmask 2111 sigblock(false) 2112 unminit() 2113 2114 mnext := lockextra(true) 2115 extraMCount++ 2116 mp.schedlink.set(mnext) 2117 2118 setg(nil) 2119 2120 // Commit the release of mp. 2121 unlockextra(mp) 2122 2123 msigrestore(sigmask) 2124 } 2125 2126 // A helper function for EnsureDropM. 2127 func getm() uintptr { 2128 return uintptr(unsafe.Pointer(getg().m)) 2129 } 2130 2131 var extram uintptr 2132 var extraMCount uint32 // Protected by lockextra 2133 var extraMWaiters uint32 2134 2135 // lockextra locks the extra list and returns the list head. 2136 // The caller must unlock the list by storing a new list head 2137 // to extram. If nilokay is true, then lockextra will 2138 // return a nil list head if that's what it finds. If nilokay is false, 2139 // lockextra will keep waiting until the list head is no longer nil. 2140 //go:nosplit 2141 func lockextra(nilokay bool) *m { 2142 const locked = 1 2143 2144 incr := false 2145 for { 2146 old := atomic.Loaduintptr(&extram) 2147 if old == locked { 2148 osyield_no_g() 2149 continue 2150 } 2151 if old == 0 && !nilokay { 2152 if !incr { 2153 // Add 1 to the number of threads 2154 // waiting for an M. 2155 // This is cleared by newextram. 2156 atomic.Xadd(&extraMWaiters, 1) 2157 incr = true 2158 } 2159 usleep_no_g(1) 2160 continue 2161 } 2162 if atomic.Casuintptr(&extram, old, locked) { 2163 return (*m)(unsafe.Pointer(old)) 2164 } 2165 osyield_no_g() 2166 continue 2167 } 2168 } 2169 2170 //go:nosplit 2171 func unlockextra(mp *m) { 2172 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 2173 } 2174 2175 // execLock serializes exec and clone to avoid bugs or unspecified behaviour 2176 // around exec'ing while creating/destroying threads. See issue #19546. 2177 var execLock rwmutex 2178 2179 // newmHandoff contains a list of m structures that need new OS threads. 2180 // This is used by newm in situations where newm itself can't safely 2181 // start an OS thread. 2182 var newmHandoff struct { 2183 lock mutex 2184 2185 // newm points to a list of M structures that need new OS 2186 // threads. The list is linked through m.schedlink. 2187 newm muintptr 2188 2189 // waiting indicates that wake needs to be notified when an m 2190 // is put on the list. 2191 waiting bool 2192 wake note 2193 2194 // haveTemplateThread indicates that the templateThread has 2195 // been started. This is not protected by lock. Use cas to set 2196 // to 1. 2197 haveTemplateThread uint32 2198 } 2199 2200 // Create a new m. It will start off with a call to fn, or else the scheduler. 2201 // fn needs to be static and not a heap allocated closure. 2202 // May run with m.p==nil, so write barriers are not allowed. 2203 // 2204 // id is optional pre-allocated m ID. Omit by passing -1. 2205 //go:nowritebarrierrec 2206 func newm(fn func(), _p_ *p, id int64) { 2207 mp := allocm(_p_, fn, id) 2208 mp.doesPark = (_p_ != nil) 2209 mp.nextp.set(_p_) 2210 mp.sigmask = initSigmask 2211 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" { 2212 // We're on a locked M or a thread that may have been 2213 // started by C. The kernel state of this thread may 2214 // be strange (the user may have locked it for that 2215 // purpose). We don't want to clone that into another 2216 // thread. Instead, ask a known-good thread to create 2217 // the thread for us. 2218 // 2219 // This is disabled on Plan 9. See golang.org/issue/22227. 2220 // 2221 // TODO: This may be unnecessary on Windows, which 2222 // doesn't model thread creation off fork. 2223 lock(&newmHandoff.lock) 2224 if newmHandoff.haveTemplateThread == 0 { 2225 throw("on a locked thread with no template thread") 2226 } 2227 mp.schedlink = newmHandoff.newm 2228 newmHandoff.newm.set(mp) 2229 if newmHandoff.waiting { 2230 newmHandoff.waiting = false 2231 notewakeup(&newmHandoff.wake) 2232 } 2233 unlock(&newmHandoff.lock) 2234 return 2235 } 2236 newm1(mp) 2237 } 2238 2239 func newm1(mp *m) { 2240 if iscgo { 2241 var ts cgothreadstart 2242 if _cgo_thread_start == nil { 2243 throw("_cgo_thread_start missing") 2244 } 2245 ts.g.set(mp.g0) 2246 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 2247 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart)) 2248 if msanenabled { 2249 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 2250 } 2251 if asanenabled { 2252 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 2253 } 2254 execLock.rlock() // Prevent process clone. 2255 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 2256 execLock.runlock() 2257 return 2258 } 2259 execLock.rlock() // Prevent process clone. 2260 newosproc(mp) 2261 execLock.runlock() 2262 } 2263 2264 // startTemplateThread starts the template thread if it is not already 2265 // running. 2266 // 2267 // The calling thread must itself be in a known-good state. 2268 func startTemplateThread() { 2269 if GOARCH == "wasm" { // no threads on wasm yet 2270 return 2271 } 2272 2273 // Disable preemption to guarantee that the template thread will be 2274 // created before a park once haveTemplateThread is set. 2275 mp := acquirem() 2276 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) { 2277 releasem(mp) 2278 return 2279 } 2280 newm(templateThread, nil, -1) 2281 releasem(mp) 2282 } 2283 2284 // mFixupRace is used to temporarily borrow the race context from the 2285 // coordinating m during a syscall_runtime_doAllThreadsSyscall and 2286 // loan it out to each of the m's of the runtime so they can execute a 2287 // mFixup.fn in that context. 2288 var mFixupRace struct { 2289 lock mutex 2290 ctx uintptr 2291 } 2292 2293 // mDoFixup runs any outstanding fixup function for the running m. 2294 // Returns true if a fixup was outstanding and actually executed. 2295 // 2296 // Note: to avoid deadlocks, and the need for the fixup function 2297 // itself to be async safe, signals are blocked for the working m 2298 // while it holds the mFixup lock. (See golang.org/issue/44193) 2299 // 2300 //go:nosplit 2301 func mDoFixup() bool { 2302 _g_ := getg() 2303 if used := atomic.Load(&_g_.m.mFixup.used); used == 0 { 2304 return false 2305 } 2306 2307 // slow path - if fixup fn is used, block signals and lock. 2308 var sigmask sigset 2309 sigsave(&sigmask) 2310 sigblock(false) 2311 lock(&_g_.m.mFixup.lock) 2312 fn := _g_.m.mFixup.fn 2313 if fn != nil { 2314 if gcphase != _GCoff { 2315 // We can't have a write barrier in this 2316 // context since we may not have a P, but we 2317 // clear fn to signal that we've executed the 2318 // fixup. As long as fn is kept alive 2319 // elsewhere, technically we should have no 2320 // issues with the GC, but fn is likely 2321 // generated in a different package altogether 2322 // that may change independently. Just assert 2323 // the GC is off so this lack of write barrier 2324 // is more obviously safe. 2325 throw("GC must be disabled to protect validity of fn value") 2326 } 2327 if _g_.racectx != 0 || !raceenabled { 2328 fn(false) 2329 } else { 2330 // temporarily acquire the context of the 2331 // originator of the 2332 // syscall_runtime_doAllThreadsSyscall and 2333 // block others from using it for the duration 2334 // of the fixup call. 2335 lock(&mFixupRace.lock) 2336 _g_.racectx = mFixupRace.ctx 2337 fn(false) 2338 _g_.racectx = 0 2339 unlock(&mFixupRace.lock) 2340 } 2341 *(*uintptr)(unsafe.Pointer(&_g_.m.mFixup.fn)) = 0 2342 atomic.Store(&_g_.m.mFixup.used, 0) 2343 } 2344 unlock(&_g_.m.mFixup.lock) 2345 msigrestore(sigmask) 2346 return fn != nil 2347 } 2348 2349 // mDoFixupAndOSYield is called when an m is unable to send a signal 2350 // because the allThreadsSyscall mechanism is in progress. That is, an 2351 // mPark() has been interrupted with this signal handler so we need to 2352 // ensure the fixup is executed from this context. 2353 //go:nosplit 2354 func mDoFixupAndOSYield() { 2355 mDoFixup() 2356 osyield() 2357 } 2358 2359 // templateThread is a thread in a known-good state that exists solely 2360 // to start new threads in known-good states when the calling thread 2361 // may not be in a good state. 2362 // 2363 // Many programs never need this, so templateThread is started lazily 2364 // when we first enter a state that might lead to running on a thread 2365 // in an unknown state. 2366 // 2367 // templateThread runs on an M without a P, so it must not have write 2368 // barriers. 2369 // 2370 //go:nowritebarrierrec 2371 func templateThread() { 2372 lock(&sched.lock) 2373 sched.nmsys++ 2374 checkdead() 2375 unlock(&sched.lock) 2376 2377 for { 2378 lock(&newmHandoff.lock) 2379 for newmHandoff.newm != 0 { 2380 newm := newmHandoff.newm.ptr() 2381 newmHandoff.newm = 0 2382 unlock(&newmHandoff.lock) 2383 for newm != nil { 2384 next := newm.schedlink.ptr() 2385 newm.schedlink = 0 2386 newm1(newm) 2387 newm = next 2388 } 2389 lock(&newmHandoff.lock) 2390 } 2391 newmHandoff.waiting = true 2392 noteclear(&newmHandoff.wake) 2393 unlock(&newmHandoff.lock) 2394 notesleep(&newmHandoff.wake) 2395 mDoFixup() 2396 } 2397 } 2398 2399 // Stops execution of the current m until new work is available. 2400 // Returns with acquired P. 2401 func stopm() { 2402 _g_ := getg() 2403 2404 if _g_.m.locks != 0 { 2405 throw("stopm holding locks") 2406 } 2407 if _g_.m.p != 0 { 2408 throw("stopm holding p") 2409 } 2410 if _g_.m.spinning { 2411 throw("stopm spinning") 2412 } 2413 2414 lock(&sched.lock) 2415 mput(_g_.m) 2416 unlock(&sched.lock) 2417 mPark() 2418 acquirep(_g_.m.nextp.ptr()) 2419 _g_.m.nextp = 0 2420 } 2421 2422 func mspinning() { 2423 // startm's caller incremented nmspinning. Set the new M's spinning. 2424 getg().m.spinning = true 2425 } 2426 2427 // Schedules some M to run the p (creates an M if necessary). 2428 // If p==nil, tries to get an idle P, if no idle P's does nothing. 2429 // May run with m.p==nil, so write barriers are not allowed. 2430 // If spinning is set, the caller has incremented nmspinning and startm will 2431 // either decrement nmspinning or set m.spinning in the newly started M. 2432 // 2433 // Callers passing a non-nil P must call from a non-preemptible context. See 2434 // comment on acquirem below. 2435 // 2436 // Must not have write barriers because this may be called without a P. 2437 //go:nowritebarrierrec 2438 func startm(_p_ *p, spinning bool) { 2439 // Disable preemption. 2440 // 2441 // Every owned P must have an owner that will eventually stop it in the 2442 // event of a GC stop request. startm takes transient ownership of a P 2443 // (either from argument or pidleget below) and transfers ownership to 2444 // a started M, which will be responsible for performing the stop. 2445 // 2446 // Preemption must be disabled during this transient ownership, 2447 // otherwise the P this is running on may enter GC stop while still 2448 // holding the transient P, leaving that P in limbo and deadlocking the 2449 // STW. 2450 // 2451 // Callers passing a non-nil P must already be in non-preemptible 2452 // context, otherwise such preemption could occur on function entry to 2453 // startm. Callers passing a nil P may be preemptible, so we must 2454 // disable preemption before acquiring a P from pidleget below. 2455 mp := acquirem() 2456 lock(&sched.lock) 2457 if _p_ == nil { 2458 _p_ = pidleget() 2459 if _p_ == nil { 2460 unlock(&sched.lock) 2461 if spinning { 2462 // The caller incremented nmspinning, but there are no idle Ps, 2463 // so it's okay to just undo the increment and give up. 2464 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2465 throw("startm: negative nmspinning") 2466 } 2467 } 2468 releasem(mp) 2469 return 2470 } 2471 } 2472 nmp := mget() 2473 if nmp == nil { 2474 // No M is available, we must drop sched.lock and call newm. 2475 // However, we already own a P to assign to the M. 2476 // 2477 // Once sched.lock is released, another G (e.g., in a syscall), 2478 // could find no idle P while checkdead finds a runnable G but 2479 // no running M's because this new M hasn't started yet, thus 2480 // throwing in an apparent deadlock. 2481 // 2482 // Avoid this situation by pre-allocating the ID for the new M, 2483 // thus marking it as 'running' before we drop sched.lock. This 2484 // new M will eventually run the scheduler to execute any 2485 // queued G's. 2486 id := mReserveID() 2487 unlock(&sched.lock) 2488 2489 var fn func() 2490 if spinning { 2491 // The caller incremented nmspinning, so set m.spinning in the new M. 2492 fn = mspinning 2493 } 2494 newm(fn, _p_, id) 2495 // Ownership transfer of _p_ committed by start in newm. 2496 // Preemption is now safe. 2497 releasem(mp) 2498 return 2499 } 2500 unlock(&sched.lock) 2501 if nmp.spinning { 2502 throw("startm: m is spinning") 2503 } 2504 if nmp.nextp != 0 { 2505 throw("startm: m has p") 2506 } 2507 if spinning && !runqempty(_p_) { 2508 throw("startm: p has runnable gs") 2509 } 2510 // The caller incremented nmspinning, so set m.spinning in the new M. 2511 nmp.spinning = spinning 2512 nmp.nextp.set(_p_) 2513 notewakeup(&nmp.park) 2514 // Ownership transfer of _p_ committed by wakeup. Preemption is now 2515 // safe. 2516 releasem(mp) 2517 } 2518 2519 // Hands off P from syscall or locked M. 2520 // Always runs without a P, so write barriers are not allowed. 2521 //go:nowritebarrierrec 2522 func handoffp(_p_ *p) { 2523 // handoffp must start an M in any situation where 2524 // findrunnable would return a G to run on _p_. 2525 2526 // if it has local work, start it straight away 2527 if !runqempty(_p_) || sched.runqsize != 0 { 2528 startm(_p_, false) 2529 return 2530 } 2531 // if it has GC work, start it straight away 2532 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 2533 startm(_p_, false) 2534 return 2535 } 2536 // no local work, check that there are no spinning/idle M's, 2537 // otherwise our help is not required 2538 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 2539 startm(_p_, true) 2540 return 2541 } 2542 lock(&sched.lock) 2543 if sched.gcwaiting != 0 { 2544 _p_.status = _Pgcstop 2545 sched.stopwait-- 2546 if sched.stopwait == 0 { 2547 notewakeup(&sched.stopnote) 2548 } 2549 unlock(&sched.lock) 2550 return 2551 } 2552 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 2553 sched.safePointFn(_p_) 2554 sched.safePointWait-- 2555 if sched.safePointWait == 0 { 2556 notewakeup(&sched.safePointNote) 2557 } 2558 } 2559 if sched.runqsize != 0 { 2560 unlock(&sched.lock) 2561 startm(_p_, false) 2562 return 2563 } 2564 // If this is the last running P and nobody is polling network, 2565 // need to wakeup another M to poll network. 2566 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 2567 unlock(&sched.lock) 2568 startm(_p_, false) 2569 return 2570 } 2571 2572 // The scheduler lock cannot be held when calling wakeNetPoller below 2573 // because wakeNetPoller may call wakep which may call startm. 2574 when := nobarrierWakeTime(_p_) 2575 pidleput(_p_) 2576 unlock(&sched.lock) 2577 2578 if when != 0 { 2579 wakeNetPoller(when) 2580 } 2581 } 2582 2583 // Tries to add one more P to execute G's. 2584 // Called when a G is made runnable (newproc, ready). 2585 func wakep() { 2586 if atomic.Load(&sched.npidle) == 0 { 2587 return 2588 } 2589 // be conservative about spinning threads 2590 if atomic.Load(&sched.nmspinning) != 0 || !atomic.Cas(&sched.nmspinning, 0, 1) { 2591 return 2592 } 2593 startm(nil, true) 2594 } 2595 2596 // Stops execution of the current m that is locked to a g until the g is runnable again. 2597 // Returns with acquired P. 2598 func stoplockedm() { 2599 _g_ := getg() 2600 2601 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m { 2602 throw("stoplockedm: inconsistent locking") 2603 } 2604 if _g_.m.p != 0 { 2605 // Schedule another M to run this p. 2606 _p_ := releasep() 2607 handoffp(_p_) 2608 } 2609 incidlelocked(1) 2610 // Wait until another thread schedules lockedg again. 2611 mPark() 2612 status := readgstatus(_g_.m.lockedg.ptr()) 2613 if status&^_Gscan != _Grunnable { 2614 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n") 2615 dumpgstatus(_g_.m.lockedg.ptr()) 2616 throw("stoplockedm: not runnable") 2617 } 2618 acquirep(_g_.m.nextp.ptr()) 2619 _g_.m.nextp = 0 2620 } 2621 2622 // Schedules the locked m to run the locked gp. 2623 // May run during STW, so write barriers are not allowed. 2624 //go:nowritebarrierrec 2625 func startlockedm(gp *g) { 2626 _g_ := getg() 2627 2628 mp := gp.lockedm.ptr() 2629 if mp == _g_.m { 2630 throw("startlockedm: locked to me") 2631 } 2632 if mp.nextp != 0 { 2633 throw("startlockedm: m has p") 2634 } 2635 // directly handoff current P to the locked m 2636 incidlelocked(-1) 2637 _p_ := releasep() 2638 mp.nextp.set(_p_) 2639 notewakeup(&mp.park) 2640 stopm() 2641 } 2642 2643 // Stops the current m for stopTheWorld. 2644 // Returns when the world is restarted. 2645 func gcstopm() { 2646 _g_ := getg() 2647 2648 if sched.gcwaiting == 0 { 2649 throw("gcstopm: not waiting for gc") 2650 } 2651 if _g_.m.spinning { 2652 _g_.m.spinning = false 2653 // OK to just drop nmspinning here, 2654 // startTheWorld will unpark threads as necessary. 2655 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2656 throw("gcstopm: negative nmspinning") 2657 } 2658 } 2659 _p_ := releasep() 2660 lock(&sched.lock) 2661 _p_.status = _Pgcstop 2662 sched.stopwait-- 2663 if sched.stopwait == 0 { 2664 notewakeup(&sched.stopnote) 2665 } 2666 unlock(&sched.lock) 2667 stopm() 2668 } 2669 2670 // Schedules gp to run on the current M. 2671 // If inheritTime is true, gp inherits the remaining time in the 2672 // current time slice. Otherwise, it starts a new time slice. 2673 // Never returns. 2674 // 2675 // Write barriers are allowed because this is called immediately after 2676 // acquiring a P in several places. 2677 // 2678 //go:yeswritebarrierrec 2679 func execute(gp *g, inheritTime bool) { 2680 _g_ := getg() 2681 2682 // Assign gp.m before entering _Grunning so running Gs have an 2683 // M. 2684 _g_.m.curg = gp 2685 gp.m = _g_.m 2686 casgstatus(gp, _Grunnable, _Grunning) 2687 gp.waitsince = 0 2688 gp.preempt = false 2689 gp.stackguard0 = gp.stack.lo + _StackGuard 2690 if !inheritTime { 2691 _g_.m.p.ptr().schedtick++ 2692 } 2693 2694 // Check whether the profiler needs to be turned on or off. 2695 hz := sched.profilehz 2696 if _g_.m.profilehz != hz { 2697 setThreadCPUProfiler(hz) 2698 } 2699 2700 if trace.enabled { 2701 // GoSysExit has to happen when we have a P, but before GoStart. 2702 // So we emit it here. 2703 if gp.syscallsp != 0 && gp.sysblocktraced { 2704 traceGoSysExit(gp.sysexitticks) 2705 } 2706 traceGoStart() 2707 } 2708 2709 gogo(&gp.sched) 2710 } 2711 2712 // Finds a runnable goroutine to execute. 2713 // Tries to steal from other P's, get g from local or global queue, poll network. 2714 func findrunnable() (gp *g, inheritTime bool) { 2715 _g_ := getg() 2716 2717 // The conditions here and in handoffp must agree: if 2718 // findrunnable would return a G to run, handoffp must start 2719 // an M. 2720 2721 top: 2722 _p_ := _g_.m.p.ptr() 2723 if sched.gcwaiting != 0 { 2724 gcstopm() 2725 goto top 2726 } 2727 if _p_.runSafePointFn != 0 { 2728 runSafePointFn() 2729 } 2730 2731 now, pollUntil, _ := checkTimers(_p_, 0) 2732 2733 if fingwait && fingwake { 2734 if gp := wakefing(); gp != nil { 2735 ready(gp, 0, true) 2736 } 2737 } 2738 if *cgo_yield != nil { 2739 asmcgocall(*cgo_yield, nil) 2740 } 2741 2742 // local runq 2743 if gp, inheritTime := runqget(_p_); gp != nil { 2744 return gp, inheritTime 2745 } 2746 2747 // global runq 2748 if sched.runqsize != 0 { 2749 lock(&sched.lock) 2750 gp := globrunqget(_p_, 0) 2751 unlock(&sched.lock) 2752 if gp != nil { 2753 return gp, false 2754 } 2755 } 2756 2757 // Poll network. 2758 // This netpoll is only an optimization before we resort to stealing. 2759 // We can safely skip it if there are no waiters or a thread is blocked 2760 // in netpoll already. If there is any kind of logical race with that 2761 // blocked thread (e.g. it has already returned from netpoll, but does 2762 // not set lastpoll yet), this thread will do blocking netpoll below 2763 // anyway. 2764 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 { 2765 if list := netpoll(0); !list.empty() { // non-blocking 2766 gp := list.pop() 2767 injectglist(&list) 2768 casgstatus(gp, _Gwaiting, _Grunnable) 2769 if trace.enabled { 2770 traceGoUnpark(gp, 0) 2771 } 2772 return gp, false 2773 } 2774 } 2775 2776 // Spinning Ms: steal work from other Ps. 2777 // 2778 // Limit the number of spinning Ms to half the number of busy Ps. 2779 // This is necessary to prevent excessive CPU consumption when 2780 // GOMAXPROCS>>1 but the program parallelism is low. 2781 procs := uint32(gomaxprocs) 2782 if _g_.m.spinning || 2*atomic.Load(&sched.nmspinning) < procs-atomic.Load(&sched.npidle) { 2783 if !_g_.m.spinning { 2784 _g_.m.spinning = true 2785 atomic.Xadd(&sched.nmspinning, 1) 2786 } 2787 2788 gp, inheritTime, tnow, w, newWork := stealWork(now) 2789 now = tnow 2790 if gp != nil { 2791 // Successfully stole. 2792 return gp, inheritTime 2793 } 2794 if newWork { 2795 // There may be new timer or GC work; restart to 2796 // discover. 2797 goto top 2798 } 2799 if w != 0 && (pollUntil == 0 || w < pollUntil) { 2800 // Earlier timer to wait for. 2801 pollUntil = w 2802 } 2803 } 2804 2805 // We have nothing to do. 2806 // 2807 // If we're in the GC mark phase, can safely scan and blacken objects, 2808 // and have work to do, run idle-time marking rather than give up the 2809 // P. 2810 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 2811 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop()) 2812 if node != nil { 2813 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 2814 gp := node.gp.ptr() 2815 casgstatus(gp, _Gwaiting, _Grunnable) 2816 if trace.enabled { 2817 traceGoUnpark(gp, 0) 2818 } 2819 return gp, false 2820 } 2821 } 2822 2823 // wasm only: 2824 // If a callback returned and no other goroutine is awake, 2825 // then wake event handler goroutine which pauses execution 2826 // until a callback was triggered. 2827 gp, otherReady := beforeIdle(now, pollUntil) 2828 if gp != nil { 2829 casgstatus(gp, _Gwaiting, _Grunnable) 2830 if trace.enabled { 2831 traceGoUnpark(gp, 0) 2832 } 2833 return gp, false 2834 } 2835 if otherReady { 2836 goto top 2837 } 2838 2839 // Before we drop our P, make a snapshot of the allp slice, 2840 // which can change underfoot once we no longer block 2841 // safe-points. We don't need to snapshot the contents because 2842 // everything up to cap(allp) is immutable. 2843 allpSnapshot := allp 2844 // Also snapshot masks. Value changes are OK, but we can't allow 2845 // len to change out from under us. 2846 idlepMaskSnapshot := idlepMask 2847 timerpMaskSnapshot := timerpMask 2848 2849 // return P and block 2850 lock(&sched.lock) 2851 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { 2852 unlock(&sched.lock) 2853 goto top 2854 } 2855 if sched.runqsize != 0 { 2856 gp := globrunqget(_p_, 0) 2857 unlock(&sched.lock) 2858 return gp, false 2859 } 2860 if releasep() != _p_ { 2861 throw("findrunnable: wrong p") 2862 } 2863 pidleput(_p_) 2864 unlock(&sched.lock) 2865 2866 // Delicate dance: thread transitions from spinning to non-spinning 2867 // state, potentially concurrently with submission of new work. We must 2868 // drop nmspinning first and then check all sources again (with 2869 // #StoreLoad memory barrier in between). If we do it the other way 2870 // around, another thread can submit work after we've checked all 2871 // sources but before we drop nmspinning; as a result nobody will 2872 // unpark a thread to run the work. 2873 // 2874 // This applies to the following sources of work: 2875 // 2876 // * Goroutines added to a per-P run queue. 2877 // * New/modified-earlier timers on a per-P timer heap. 2878 // * Idle-priority GC work (barring golang.org/issue/19112). 2879 // 2880 // If we discover new work below, we need to restore m.spinning as a signal 2881 // for resetspinning to unpark a new worker thread (because there can be more 2882 // than one starving goroutine). However, if after discovering new work 2883 // we also observe no idle Ps it is OK to skip unparking a new worker 2884 // thread: the system is fully loaded so no spinning threads are required. 2885 // Also see "Worker thread parking/unparking" comment at the top of the file. 2886 wasSpinning := _g_.m.spinning 2887 if _g_.m.spinning { 2888 _g_.m.spinning = false 2889 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2890 throw("findrunnable: negative nmspinning") 2891 } 2892 2893 // Note the for correctness, only the last M transitioning from 2894 // spinning to non-spinning must perform these rechecks to 2895 // ensure no missed work. We are performing it on every M that 2896 // transitions as a conservative change to monitor effects on 2897 // latency. See golang.org/issue/43997. 2898 2899 // Check all runqueues once again. 2900 _p_ = checkRunqsNoP(allpSnapshot, idlepMaskSnapshot) 2901 if _p_ != nil { 2902 acquirep(_p_) 2903 _g_.m.spinning = true 2904 atomic.Xadd(&sched.nmspinning, 1) 2905 goto top 2906 } 2907 2908 // Check for idle-priority GC work again. 2909 _p_, gp = checkIdleGCNoP() 2910 if _p_ != nil { 2911 acquirep(_p_) 2912 _g_.m.spinning = true 2913 atomic.Xadd(&sched.nmspinning, 1) 2914 2915 // Run the idle worker. 2916 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 2917 casgstatus(gp, _Gwaiting, _Grunnable) 2918 if trace.enabled { 2919 traceGoUnpark(gp, 0) 2920 } 2921 return gp, false 2922 } 2923 2924 // Finally, check for timer creation or expiry concurrently with 2925 // transitioning from spinning to non-spinning. 2926 // 2927 // Note that we cannot use checkTimers here because it calls 2928 // adjusttimers which may need to allocate memory, and that isn't 2929 // allowed when we don't have an active P. 2930 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil) 2931 } 2932 2933 // Poll network until next timer. 2934 if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 2935 atomic.Store64(&sched.pollUntil, uint64(pollUntil)) 2936 if _g_.m.p != 0 { 2937 throw("findrunnable: netpoll with p") 2938 } 2939 if _g_.m.spinning { 2940 throw("findrunnable: netpoll with spinning") 2941 } 2942 delay := int64(-1) 2943 if pollUntil != 0 { 2944 if now == 0 { 2945 now = nanotime() 2946 } 2947 delay = pollUntil - now 2948 if delay < 0 { 2949 delay = 0 2950 } 2951 } 2952 if faketime != 0 { 2953 // When using fake time, just poll. 2954 delay = 0 2955 } 2956 list := netpoll(delay) // block until new work is available 2957 atomic.Store64(&sched.pollUntil, 0) 2958 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 2959 if faketime != 0 && list.empty() { 2960 // Using fake time and nothing is ready; stop M. 2961 // When all M's stop, checkdead will call timejump. 2962 stopm() 2963 goto top 2964 } 2965 lock(&sched.lock) 2966 _p_ = pidleget() 2967 unlock(&sched.lock) 2968 if _p_ == nil { 2969 injectglist(&list) 2970 } else { 2971 acquirep(_p_) 2972 if !list.empty() { 2973 gp := list.pop() 2974 injectglist(&list) 2975 casgstatus(gp, _Gwaiting, _Grunnable) 2976 if trace.enabled { 2977 traceGoUnpark(gp, 0) 2978 } 2979 return gp, false 2980 } 2981 if wasSpinning { 2982 _g_.m.spinning = true 2983 atomic.Xadd(&sched.nmspinning, 1) 2984 } 2985 goto top 2986 } 2987 } else if pollUntil != 0 && netpollinited() { 2988 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) 2989 if pollerPollUntil == 0 || pollerPollUntil > pollUntil { 2990 netpollBreak() 2991 } 2992 } 2993 stopm() 2994 goto top 2995 } 2996 2997 // pollWork reports whether there is non-background work this P could 2998 // be doing. This is a fairly lightweight check to be used for 2999 // background work loops, like idle GC. It checks a subset of the 3000 // conditions checked by the actual scheduler. 3001 func pollWork() bool { 3002 if sched.runqsize != 0 { 3003 return true 3004 } 3005 p := getg().m.p.ptr() 3006 if !runqempty(p) { 3007 return true 3008 } 3009 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { 3010 if list := netpoll(0); !list.empty() { 3011 injectglist(&list) 3012 return true 3013 } 3014 } 3015 return false 3016 } 3017 3018 // stealWork attempts to steal a runnable goroutine or timer from any P. 3019 // 3020 // If newWork is true, new work may have been readied. 3021 // 3022 // If now is not 0 it is the current time. stealWork returns the passed time or 3023 // the current time if now was passed as 0. 3024 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) { 3025 pp := getg().m.p.ptr() 3026 3027 ranTimer := false 3028 3029 const stealTries = 4 3030 for i := 0; i < stealTries; i++ { 3031 stealTimersOrRunNextG := i == stealTries-1 3032 3033 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { 3034 if sched.gcwaiting != 0 { 3035 // GC work may be available. 3036 return nil, false, now, pollUntil, true 3037 } 3038 p2 := allp[enum.position()] 3039 if pp == p2 { 3040 continue 3041 } 3042 3043 // Steal timers from p2. This call to checkTimers is the only place 3044 // where we might hold a lock on a different P's timers. We do this 3045 // once on the last pass before checking runnext because stealing 3046 // from the other P's runnext should be the last resort, so if there 3047 // are timers to steal do that first. 3048 // 3049 // We only check timers on one of the stealing iterations because 3050 // the time stored in now doesn't change in this loop and checking 3051 // the timers for each P more than once with the same value of now 3052 // is probably a waste of time. 3053 // 3054 // timerpMask tells us whether the P may have timers at all. If it 3055 // can't, no need to check at all. 3056 if stealTimersOrRunNextG && timerpMask.read(enum.position()) { 3057 tnow, w, ran := checkTimers(p2, now) 3058 now = tnow 3059 if w != 0 && (pollUntil == 0 || w < pollUntil) { 3060 pollUntil = w 3061 } 3062 if ran { 3063 // Running the timers may have 3064 // made an arbitrary number of G's 3065 // ready and added them to this P's 3066 // local run queue. That invalidates 3067 // the assumption of runqsteal 3068 // that it always has room to add 3069 // stolen G's. So check now if there 3070 // is a local G to run. 3071 if gp, inheritTime := runqget(pp); gp != nil { 3072 return gp, inheritTime, now, pollUntil, ranTimer 3073 } 3074 ranTimer = true 3075 } 3076 } 3077 3078 // Don't bother to attempt to steal if p2 is idle. 3079 if !idlepMask.read(enum.position()) { 3080 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil { 3081 return gp, false, now, pollUntil, ranTimer 3082 } 3083 } 3084 } 3085 } 3086 3087 // No goroutines found to steal. Regardless, running a timer may have 3088 // made some goroutine ready that we missed. Indicate the next timer to 3089 // wait for. 3090 return nil, false, now, pollUntil, ranTimer 3091 } 3092 3093 // Check all Ps for a runnable G to steal. 3094 // 3095 // On entry we have no P. If a G is available to steal and a P is available, 3096 // the P is returned which the caller should acquire and attempt to steal the 3097 // work to. 3098 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p { 3099 for id, p2 := range allpSnapshot { 3100 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) { 3101 lock(&sched.lock) 3102 pp := pidleget() 3103 unlock(&sched.lock) 3104 if pp != nil { 3105 return pp 3106 } 3107 3108 // Can't get a P, don't bother checking remaining Ps. 3109 break 3110 } 3111 } 3112 3113 return nil 3114 } 3115 3116 // Check all Ps for a timer expiring sooner than pollUntil. 3117 // 3118 // Returns updated pollUntil value. 3119 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 { 3120 for id, p2 := range allpSnapshot { 3121 if timerpMaskSnapshot.read(uint32(id)) { 3122 w := nobarrierWakeTime(p2) 3123 if w != 0 && (pollUntil == 0 || w < pollUntil) { 3124 pollUntil = w 3125 } 3126 } 3127 } 3128 3129 return pollUntil 3130 } 3131 3132 // Check for idle-priority GC, without a P on entry. 3133 // 3134 // If some GC work, a P, and a worker G are all available, the P and G will be 3135 // returned. The returned P has not been wired yet. 3136 func checkIdleGCNoP() (*p, *g) { 3137 // N.B. Since we have no P, gcBlackenEnabled may change at any time; we 3138 // must check again after acquiring a P. 3139 if atomic.Load(&gcBlackenEnabled) == 0 { 3140 return nil, nil 3141 } 3142 if !gcMarkWorkAvailable(nil) { 3143 return nil, nil 3144 } 3145 3146 // Work is available; we can start an idle GC worker only if there is 3147 // an available P and available worker G. 3148 // 3149 // We can attempt to acquire these in either order, though both have 3150 // synchronization concerns (see below). Workers are almost always 3151 // available (see comment in findRunnableGCWorker for the one case 3152 // there may be none). Since we're slightly less likely to find a P, 3153 // check for that first. 3154 // 3155 // Synchronization: note that we must hold sched.lock until we are 3156 // committed to keeping it. Otherwise we cannot put the unnecessary P 3157 // back in sched.pidle without performing the full set of idle 3158 // transition checks. 3159 // 3160 // If we were to check gcBgMarkWorkerPool first, we must somehow handle 3161 // the assumption in gcControllerState.findRunnableGCWorker that an 3162 // empty gcBgMarkWorkerPool is only possible if gcMarkDone is running. 3163 lock(&sched.lock) 3164 pp := pidleget() 3165 if pp == nil { 3166 unlock(&sched.lock) 3167 return nil, nil 3168 } 3169 3170 // Now that we own a P, gcBlackenEnabled can't change (as it requires 3171 // STW). 3172 if gcBlackenEnabled == 0 { 3173 pidleput(pp) 3174 unlock(&sched.lock) 3175 return nil, nil 3176 } 3177 3178 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop()) 3179 if node == nil { 3180 pidleput(pp) 3181 unlock(&sched.lock) 3182 return nil, nil 3183 } 3184 3185 unlock(&sched.lock) 3186 3187 return pp, node.gp.ptr() 3188 } 3189 3190 // wakeNetPoller wakes up the thread sleeping in the network poller if it isn't 3191 // going to wake up before the when argument; or it wakes an idle P to service 3192 // timers and the network poller if there isn't one already. 3193 func wakeNetPoller(when int64) { 3194 if atomic.Load64(&sched.lastpoll) == 0 { 3195 // In findrunnable we ensure that when polling the pollUntil 3196 // field is either zero or the time to which the current 3197 // poll is expected to run. This can have a spurious wakeup 3198 // but should never miss a wakeup. 3199 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) 3200 if pollerPollUntil == 0 || pollerPollUntil > when { 3201 netpollBreak() 3202 } 3203 } else { 3204 // There are no threads in the network poller, try to get 3205 // one there so it can handle new timers. 3206 if GOOS != "plan9" { // Temporary workaround - see issue #42303. 3207 wakep() 3208 } 3209 } 3210 } 3211 3212 func resetspinning() { 3213 _g_ := getg() 3214 if !_g_.m.spinning { 3215 throw("resetspinning: not a spinning m") 3216 } 3217 _g_.m.spinning = false 3218 nmspinning := atomic.Xadd(&sched.nmspinning, -1) 3219 if int32(nmspinning) < 0 { 3220 throw("findrunnable: negative nmspinning") 3221 } 3222 // M wakeup policy is deliberately somewhat conservative, so check if we 3223 // need to wakeup another P here. See "Worker thread parking/unparking" 3224 // comment at the top of the file for details. 3225 wakep() 3226 } 3227 3228 // injectglist adds each runnable G on the list to some run queue, 3229 // and clears glist. If there is no current P, they are added to the 3230 // global queue, and up to npidle M's are started to run them. 3231 // Otherwise, for each idle P, this adds a G to the global queue 3232 // and starts an M. Any remaining G's are added to the current P's 3233 // local run queue. 3234 // This may temporarily acquire sched.lock. 3235 // Can run concurrently with GC. 3236 func injectglist(glist *gList) { 3237 if glist.empty() { 3238 return 3239 } 3240 if trace.enabled { 3241 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { 3242 traceGoUnpark(gp, 0) 3243 } 3244 } 3245 3246 // Mark all the goroutines as runnable before we put them 3247 // on the run queues. 3248 head := glist.head.ptr() 3249 var tail *g 3250 qsize := 0 3251 for gp := head; gp != nil; gp = gp.schedlink.ptr() { 3252 tail = gp 3253 qsize++ 3254 casgstatus(gp, _Gwaiting, _Grunnable) 3255 } 3256 3257 // Turn the gList into a gQueue. 3258 var q gQueue 3259 q.head.set(head) 3260 q.tail.set(tail) 3261 *glist = gList{} 3262 3263 startIdle := func(n int) { 3264 for ; n != 0 && sched.npidle != 0; n-- { 3265 startm(nil, false) 3266 } 3267 } 3268 3269 pp := getg().m.p.ptr() 3270 if pp == nil { 3271 lock(&sched.lock) 3272 globrunqputbatch(&q, int32(qsize)) 3273 unlock(&sched.lock) 3274 startIdle(qsize) 3275 return 3276 } 3277 3278 npidle := int(atomic.Load(&sched.npidle)) 3279 var globq gQueue 3280 var n int 3281 for n = 0; n < npidle && !q.empty(); n++ { 3282 g := q.pop() 3283 globq.pushBack(g) 3284 } 3285 if n > 0 { 3286 lock(&sched.lock) 3287 globrunqputbatch(&globq, int32(n)) 3288 unlock(&sched.lock) 3289 startIdle(n) 3290 qsize -= n 3291 } 3292 3293 if !q.empty() { 3294 runqputbatch(pp, &q, qsize) 3295 } 3296 } 3297 3298 // One round of scheduler: find a runnable goroutine and execute it. 3299 // Never returns. 3300 func schedule() { 3301 _g_ := getg() 3302 3303 if _g_.m.locks != 0 { 3304 throw("schedule: holding locks") 3305 } 3306 3307 if _g_.m.lockedg != 0 { 3308 stoplockedm() 3309 execute(_g_.m.lockedg.ptr(), false) // Never returns. 3310 } 3311 3312 // We should not schedule away from a g that is executing a cgo call, 3313 // since the cgo call is using the m's g0 stack. 3314 if _g_.m.incgo { 3315 throw("schedule: in cgo") 3316 } 3317 3318 top: 3319 pp := _g_.m.p.ptr() 3320 pp.preempt = false 3321 3322 if sched.gcwaiting != 0 { 3323 gcstopm() 3324 goto top 3325 } 3326 if pp.runSafePointFn != 0 { 3327 runSafePointFn() 3328 } 3329 3330 // Sanity check: if we are spinning, the run queue should be empty. 3331 // Check this before calling checkTimers, as that might call 3332 // goready to put a ready goroutine on the local run queue. 3333 if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) { 3334 throw("schedule: spinning with local work") 3335 } 3336 3337 checkTimers(pp, 0) 3338 3339 var gp *g 3340 var inheritTime bool 3341 3342 // Normal goroutines will check for need to wakeP in ready, 3343 // but GCworkers and tracereaders will not, so the check must 3344 // be done here instead. 3345 tryWakeP := false 3346 if trace.enabled || trace.shutdown { 3347 gp = traceReader() 3348 if gp != nil { 3349 casgstatus(gp, _Gwaiting, _Grunnable) 3350 traceGoUnpark(gp, 0) 3351 tryWakeP = true 3352 } 3353 } 3354 if gp == nil && gcBlackenEnabled != 0 { 3355 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 3356 if gp != nil { 3357 tryWakeP = true 3358 } 3359 } 3360 if gp == nil { 3361 // Check the global runnable queue once in a while to ensure fairness. 3362 // Otherwise two goroutines can completely occupy the local runqueue 3363 // by constantly respawning each other. 3364 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 3365 lock(&sched.lock) 3366 gp = globrunqget(_g_.m.p.ptr(), 1) 3367 unlock(&sched.lock) 3368 } 3369 } 3370 if gp == nil { 3371 gp, inheritTime = runqget(_g_.m.p.ptr()) 3372 // We can see gp != nil here even if the M is spinning, 3373 // if checkTimers added a local goroutine via goready. 3374 } 3375 if gp == nil { 3376 gp, inheritTime = findrunnable() // blocks until work is available 3377 } 3378 3379 // This thread is going to run a goroutine and is not spinning anymore, 3380 // so if it was marked as spinning we need to reset it now and potentially 3381 // start a new spinning M. 3382 if _g_.m.spinning { 3383 resetspinning() 3384 } 3385 3386 if sched.disable.user && !schedEnabled(gp) { 3387 // Scheduling of this goroutine is disabled. Put it on 3388 // the list of pending runnable goroutines for when we 3389 // re-enable user scheduling and look again. 3390 lock(&sched.lock) 3391 if schedEnabled(gp) { 3392 // Something re-enabled scheduling while we 3393 // were acquiring the lock. 3394 unlock(&sched.lock) 3395 } else { 3396 sched.disable.runnable.pushBack(gp) 3397 sched.disable.n++ 3398 unlock(&sched.lock) 3399 goto top 3400 } 3401 } 3402 3403 // If about to schedule a not-normal goroutine (a GCworker or tracereader), 3404 // wake a P if there is one. 3405 if tryWakeP { 3406 wakep() 3407 } 3408 if gp.lockedm != 0 { 3409 // Hands off own p to the locked m, 3410 // then blocks waiting for a new p. 3411 startlockedm(gp) 3412 goto top 3413 } 3414 3415 execute(gp, inheritTime) 3416 } 3417 3418 // dropg removes the association between m and the current goroutine m->curg (gp for short). 3419 // Typically a caller sets gp's status away from Grunning and then 3420 // immediately calls dropg to finish the job. The caller is also responsible 3421 // for arranging that gp will be restarted using ready at an 3422 // appropriate time. After calling dropg and arranging for gp to be 3423 // readied later, the caller can do other work but eventually should 3424 // call schedule to restart the scheduling of goroutines on this m. 3425 func dropg() { 3426 _g_ := getg() 3427 3428 setMNoWB(&_g_.m.curg.m, nil) 3429 setGNoWB(&_g_.m.curg, nil) 3430 } 3431 3432 // checkTimers runs any timers for the P that are ready. 3433 // If now is not 0 it is the current time. 3434 // It returns the passed time or the current time if now was passed as 0. 3435 // and the time when the next timer should run or 0 if there is no next timer, 3436 // and reports whether it ran any timers. 3437 // If the time when the next timer should run is not 0, 3438 // it is always larger than the returned time. 3439 // We pass now in and out to avoid extra calls of nanotime. 3440 //go:yeswritebarrierrec 3441 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) { 3442 // If it's not yet time for the first timer, or the first adjusted 3443 // timer, then there is nothing to do. 3444 next := int64(atomic.Load64(&pp.timer0When)) 3445 nextAdj := int64(atomic.Load64(&pp.timerModifiedEarliest)) 3446 if next == 0 || (nextAdj != 0 && nextAdj < next) { 3447 next = nextAdj 3448 } 3449 3450 if next == 0 { 3451 // No timers to run or adjust. 3452 return now, 0, false 3453 } 3454 3455 if now == 0 { 3456 now = nanotime() 3457 } 3458 if now < next { 3459 // Next timer is not ready to run, but keep going 3460 // if we would clear deleted timers. 3461 // This corresponds to the condition below where 3462 // we decide whether to call clearDeletedTimers. 3463 if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) { 3464 return now, next, false 3465 } 3466 } 3467 3468 lock(&pp.timersLock) 3469 3470 if len(pp.timers) > 0 { 3471 adjusttimers(pp, now) 3472 for len(pp.timers) > 0 { 3473 // Note that runtimer may temporarily unlock 3474 // pp.timersLock. 3475 if tw := runtimer(pp, now); tw != 0 { 3476 if tw > 0 { 3477 pollUntil = tw 3478 } 3479 break 3480 } 3481 ran = true 3482 } 3483 } 3484 3485 // If this is the local P, and there are a lot of deleted timers, 3486 // clear them out. We only do this for the local P to reduce 3487 // lock contention on timersLock. 3488 if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 { 3489 clearDeletedTimers(pp) 3490 } 3491 3492 unlock(&pp.timersLock) 3493 3494 return now, pollUntil, ran 3495 } 3496 3497 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 3498 unlock((*mutex)(lock)) 3499 return true 3500 } 3501 3502 // park continuation on g0. 3503 func park_m(gp *g) { 3504 _g_ := getg() 3505 3506 if trace.enabled { 3507 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip) 3508 } 3509 3510 casgstatus(gp, _Grunning, _Gwaiting) 3511 dropg() 3512 3513 if fn := _g_.m.waitunlockf; fn != nil { 3514 ok := fn(gp, _g_.m.waitlock) 3515 _g_.m.waitunlockf = nil 3516 _g_.m.waitlock = nil 3517 if !ok { 3518 if trace.enabled { 3519 traceGoUnpark(gp, 2) 3520 } 3521 casgstatus(gp, _Gwaiting, _Grunnable) 3522 execute(gp, true) // Schedule it back, never returns. 3523 } 3524 } 3525 schedule() 3526 } 3527 3528 func goschedImpl(gp *g) { 3529 status := readgstatus(gp) 3530 if status&^_Gscan != _Grunning { 3531 dumpgstatus(gp) 3532 throw("bad g status") 3533 } 3534 casgstatus(gp, _Grunning, _Grunnable) 3535 dropg() 3536 lock(&sched.lock) 3537 globrunqput(gp) 3538 unlock(&sched.lock) 3539 3540 schedule() 3541 } 3542 3543 // Gosched continuation on g0. 3544 func gosched_m(gp *g) { 3545 if trace.enabled { 3546 traceGoSched() 3547 } 3548 goschedImpl(gp) 3549 } 3550 3551 // goschedguarded is a forbidden-states-avoided version of gosched_m 3552 func goschedguarded_m(gp *g) { 3553 3554 if !canPreemptM(gp.m) { 3555 gogo(&gp.sched) // never return 3556 } 3557 3558 if trace.enabled { 3559 traceGoSched() 3560 } 3561 goschedImpl(gp) 3562 } 3563 3564 func gopreempt_m(gp *g) { 3565 if trace.enabled { 3566 traceGoPreempt() 3567 } 3568 goschedImpl(gp) 3569 } 3570 3571 // preemptPark parks gp and puts it in _Gpreempted. 3572 // 3573 //go:systemstack 3574 func preemptPark(gp *g) { 3575 if trace.enabled { 3576 traceGoPark(traceEvGoBlock, 0) 3577 } 3578 status := readgstatus(gp) 3579 if status&^_Gscan != _Grunning { 3580 dumpgstatus(gp) 3581 throw("bad g status") 3582 } 3583 gp.waitreason = waitReasonPreempted 3584 3585 if gp.asyncSafePoint { 3586 // Double-check that async preemption does not 3587 // happen in SPWRITE assembly functions. 3588 // isAsyncSafePoint must exclude this case. 3589 f := findfunc(gp.sched.pc) 3590 if !f.valid() { 3591 throw("preempt at unknown pc") 3592 } 3593 if f.flag&funcFlag_SPWRITE != 0 { 3594 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt") 3595 throw("preempt SPWRITE") 3596 } 3597 } 3598 3599 // Transition from _Grunning to _Gscan|_Gpreempted. We can't 3600 // be in _Grunning when we dropg because then we'd be running 3601 // without an M, but the moment we're in _Gpreempted, 3602 // something could claim this G before we've fully cleaned it 3603 // up. Hence, we set the scan bit to lock down further 3604 // transitions until we can dropg. 3605 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted) 3606 dropg() 3607 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted) 3608 schedule() 3609 } 3610 3611 // goyield is like Gosched, but it: 3612 // - emits a GoPreempt trace event instead of a GoSched trace event 3613 // - puts the current G on the runq of the current P instead of the globrunq 3614 func goyield() { 3615 checkTimeouts() 3616 mcall(goyield_m) 3617 } 3618 3619 func goyield_m(gp *g) { 3620 if trace.enabled { 3621 traceGoPreempt() 3622 } 3623 pp := gp.m.p.ptr() 3624 casgstatus(gp, _Grunning, _Grunnable) 3625 dropg() 3626 runqput(pp, gp, false) 3627 schedule() 3628 } 3629 3630 // Finishes execution of the current goroutine. 3631 func goexit1() { 3632 if raceenabled { 3633 racegoend() 3634 } 3635 if trace.enabled { 3636 traceGoEnd() 3637 } 3638 mcall(goexit0) 3639 } 3640 3641 // goexit continuation on g0. 3642 func goexit0(gp *g) { 3643 _g_ := getg() 3644 _p_ := _g_.m.p.ptr() 3645 3646 casgstatus(gp, _Grunning, _Gdead) 3647 gcController.addScannableStack(_p_, -int64(gp.stack.hi-gp.stack.lo)) 3648 if isSystemGoroutine(gp, false) { 3649 atomic.Xadd(&sched.ngsys, -1) 3650 } 3651 gp.m = nil 3652 locked := gp.lockedm != 0 3653 gp.lockedm = 0 3654 _g_.m.lockedg = 0 3655 gp.preemptStop = false 3656 gp.paniconfault = false 3657 gp._defer = nil // should be true already but just in case. 3658 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 3659 gp.writebuf = nil 3660 gp.waitreason = 0 3661 gp.param = nil 3662 gp.labels = nil 3663 gp.timer = nil 3664 3665 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { 3666 // Flush assist credit to the global pool. This gives 3667 // better information to pacing if the application is 3668 // rapidly creating an exiting goroutines. 3669 assistWorkPerByte := gcController.assistWorkPerByte.Load() 3670 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes)) 3671 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit) 3672 gp.gcAssistBytes = 0 3673 } 3674 3675 dropg() 3676 3677 if GOARCH == "wasm" { // no threads yet on wasm 3678 gfput(_p_, gp) 3679 schedule() // never returns 3680 } 3681 3682 if _g_.m.lockedInt != 0 { 3683 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n") 3684 throw("internal lockOSThread error") 3685 } 3686 gfput(_p_, gp) 3687 if locked { 3688 // The goroutine may have locked this thread because 3689 // it put it in an unusual kernel state. Kill it 3690 // rather than returning it to the thread pool. 3691 3692 // Return to mstart, which will release the P and exit 3693 // the thread. 3694 if GOOS != "plan9" { // See golang.org/issue/22227. 3695 gogo(&_g_.m.g0.sched) 3696 } else { 3697 // Clear lockedExt on plan9 since we may end up re-using 3698 // this thread. 3699 _g_.m.lockedExt = 0 3700 } 3701 } 3702 schedule() 3703 } 3704 3705 // save updates getg().sched to refer to pc and sp so that a following 3706 // gogo will restore pc and sp. 3707 // 3708 // save must not have write barriers because invoking a write barrier 3709 // can clobber getg().sched. 3710 // 3711 //go:nosplit 3712 //go:nowritebarrierrec 3713 func save(pc, sp uintptr) { 3714 _g_ := getg() 3715 3716 if _g_ == _g_.m.g0 || _g_ == _g_.m.gsignal { 3717 // m.g0.sched is special and must describe the context 3718 // for exiting the thread. mstart1 writes to it directly. 3719 // m.gsignal.sched should not be used at all. 3720 // This check makes sure save calls do not accidentally 3721 // run in contexts where they'd write to system g's. 3722 throw("save on system g not allowed") 3723 } 3724 3725 _g_.sched.pc = pc 3726 _g_.sched.sp = sp 3727 _g_.sched.lr = 0 3728 _g_.sched.ret = 0 3729 // We need to ensure ctxt is zero, but can't have a write 3730 // barrier here. However, it should always already be zero. 3731 // Assert that. 3732 if _g_.sched.ctxt != nil { 3733 badctxt() 3734 } 3735 } 3736 3737 // The goroutine g is about to enter a system call. 3738 // Record that it's not using the cpu anymore. 3739 // This is called only from the go syscall library and cgocall, 3740 // not from the low-level system calls used by the runtime. 3741 // 3742 // Entersyscall cannot split the stack: the save must 3743 // make g->sched refer to the caller's stack segment, because 3744 // entersyscall is going to return immediately after. 3745 // 3746 // Nothing entersyscall calls can split the stack either. 3747 // We cannot safely move the stack during an active call to syscall, 3748 // because we do not know which of the uintptr arguments are 3749 // really pointers (back into the stack). 3750 // In practice, this means that we make the fast path run through 3751 // entersyscall doing no-split things, and the slow path has to use systemstack 3752 // to run bigger things on the system stack. 3753 // 3754 // reentersyscall is the entry point used by cgo callbacks, where explicitly 3755 // saved SP and PC are restored. This is needed when exitsyscall will be called 3756 // from a function further up in the call stack than the parent, as g->syscallsp 3757 // must always point to a valid stack frame. entersyscall below is the normal 3758 // entry point for syscalls, which obtains the SP and PC from the caller. 3759 // 3760 // Syscall tracing: 3761 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 3762 // If the syscall does not block, that is it, we do not emit any other events. 3763 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 3764 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 3765 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 3766 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 3767 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 3768 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 3769 // and we wait for the increment before emitting traceGoSysExit. 3770 // Note that the increment is done even if tracing is not enabled, 3771 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 3772 // 3773 //go:nosplit 3774 func reentersyscall(pc, sp uintptr) { 3775 _g_ := getg() 3776 3777 // Disable preemption because during this function g is in Gsyscall status, 3778 // but can have inconsistent g->sched, do not let GC observe it. 3779 _g_.m.locks++ 3780 3781 // Entersyscall must not call any function that might split/grow the stack. 3782 // (See details in comment above.) 3783 // Catch calls that might, by replacing the stack guard with something that 3784 // will trip any stack check and leaving a flag to tell newstack to die. 3785 _g_.stackguard0 = stackPreempt 3786 _g_.throwsplit = true 3787 3788 // Leave SP around for GC and traceback. 3789 save(pc, sp) 3790 _g_.syscallsp = sp 3791 _g_.syscallpc = pc 3792 casgstatus(_g_, _Grunning, _Gsyscall) 3793 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 3794 systemstack(func() { 3795 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 3796 throw("entersyscall") 3797 }) 3798 } 3799 3800 if trace.enabled { 3801 systemstack(traceGoSysCall) 3802 // systemstack itself clobbers g.sched.{pc,sp} and we might 3803 // need them later when the G is genuinely blocked in a 3804 // syscall 3805 save(pc, sp) 3806 } 3807 3808 if atomic.Load(&sched.sysmonwait) != 0 { 3809 systemstack(entersyscall_sysmon) 3810 save(pc, sp) 3811 } 3812 3813 if _g_.m.p.ptr().runSafePointFn != 0 { 3814 // runSafePointFn may stack split if run on this stack 3815 systemstack(runSafePointFn) 3816 save(pc, sp) 3817 } 3818 3819 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 3820 _g_.sysblocktraced = true 3821 pp := _g_.m.p.ptr() 3822 pp.m = 0 3823 _g_.m.oldp.set(pp) 3824 _g_.m.p = 0 3825 atomic.Store(&pp.status, _Psyscall) 3826 if sched.gcwaiting != 0 { 3827 systemstack(entersyscall_gcwait) 3828 save(pc, sp) 3829 } 3830 3831 _g_.m.locks-- 3832 } 3833 3834 // Standard syscall entry used by the go syscall library and normal cgo calls. 3835 // 3836 // This is exported via linkname to assembly in the syscall package. 3837 // 3838 //go:nosplit 3839 //go:linkname entersyscall 3840 func entersyscall() { 3841 reentersyscall(getcallerpc(), getcallersp()) 3842 } 3843 3844 func entersyscall_sysmon() { 3845 lock(&sched.lock) 3846 if atomic.Load(&sched.sysmonwait) != 0 { 3847 atomic.Store(&sched.sysmonwait, 0) 3848 notewakeup(&sched.sysmonnote) 3849 } 3850 unlock(&sched.lock) 3851 } 3852 3853 func entersyscall_gcwait() { 3854 _g_ := getg() 3855 _p_ := _g_.m.oldp.ptr() 3856 3857 lock(&sched.lock) 3858 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 3859 if trace.enabled { 3860 traceGoSysBlock(_p_) 3861 traceProcStop(_p_) 3862 } 3863 _p_.syscalltick++ 3864 if sched.stopwait--; sched.stopwait == 0 { 3865 notewakeup(&sched.stopnote) 3866 } 3867 } 3868 unlock(&sched.lock) 3869 } 3870 3871 // The same as entersyscall(), but with a hint that the syscall is blocking. 3872 //go:nosplit 3873 func entersyscallblock() { 3874 _g_ := getg() 3875 3876 _g_.m.locks++ // see comment in entersyscall 3877 _g_.throwsplit = true 3878 _g_.stackguard0 = stackPreempt // see comment in entersyscall 3879 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 3880 _g_.sysblocktraced = true 3881 _g_.m.p.ptr().syscalltick++ 3882 3883 // Leave SP around for GC and traceback. 3884 pc := getcallerpc() 3885 sp := getcallersp() 3886 save(pc, sp) 3887 _g_.syscallsp = _g_.sched.sp 3888 _g_.syscallpc = _g_.sched.pc 3889 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 3890 sp1 := sp 3891 sp2 := _g_.sched.sp 3892 sp3 := _g_.syscallsp 3893 systemstack(func() { 3894 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 3895 throw("entersyscallblock") 3896 }) 3897 } 3898 casgstatus(_g_, _Grunning, _Gsyscall) 3899 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 3900 systemstack(func() { 3901 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 3902 throw("entersyscallblock") 3903 }) 3904 } 3905 3906 systemstack(entersyscallblock_handoff) 3907 3908 // Resave for traceback during blocked call. 3909 save(getcallerpc(), getcallersp()) 3910 3911 _g_.m.locks-- 3912 } 3913 3914 func entersyscallblock_handoff() { 3915 if trace.enabled { 3916 traceGoSysCall() 3917 traceGoSysBlock(getg().m.p.ptr()) 3918 } 3919 handoffp(releasep()) 3920 } 3921 3922 // The goroutine g exited its system call. 3923 // Arrange for it to run on a cpu again. 3924 // This is called only from the go syscall library, not 3925 // from the low-level system calls used by the runtime. 3926 // 3927 // Write barriers are not allowed because our P may have been stolen. 3928 // 3929 // This is exported via linkname to assembly in the syscall package. 3930 // 3931 //go:nosplit 3932 //go:nowritebarrierrec 3933 //go:linkname exitsyscall 3934 func exitsyscall() { 3935 _g_ := getg() 3936 3937 _g_.m.locks++ // see comment in entersyscall 3938 if getcallersp() > _g_.syscallsp { 3939 throw("exitsyscall: syscall frame is no longer valid") 3940 } 3941 3942 _g_.waitsince = 0 3943 oldp := _g_.m.oldp.ptr() 3944 _g_.m.oldp = 0 3945 if exitsyscallfast(oldp) { 3946 if trace.enabled { 3947 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 3948 systemstack(traceGoStart) 3949 } 3950 } 3951 // There's a cpu for us, so we can run. 3952 _g_.m.p.ptr().syscalltick++ 3953 // We need to cas the status and scan before resuming... 3954 casgstatus(_g_, _Gsyscall, _Grunning) 3955 3956 // Garbage collector isn't running (since we are), 3957 // so okay to clear syscallsp. 3958 _g_.syscallsp = 0 3959 _g_.m.locks-- 3960 if _g_.preempt { 3961 // restore the preemption request in case we've cleared it in newstack 3962 _g_.stackguard0 = stackPreempt 3963 } else { 3964 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 3965 _g_.stackguard0 = _g_.stack.lo + _StackGuard 3966 } 3967 _g_.throwsplit = false 3968 3969 if sched.disable.user && !schedEnabled(_g_) { 3970 // Scheduling of this goroutine is disabled. 3971 Gosched() 3972 } 3973 3974 return 3975 } 3976 3977 _g_.sysexitticks = 0 3978 if trace.enabled { 3979 // Wait till traceGoSysBlock event is emitted. 3980 // This ensures consistency of the trace (the goroutine is started after it is blocked). 3981 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 3982 osyield() 3983 } 3984 // We can't trace syscall exit right now because we don't have a P. 3985 // Tracing code can invoke write barriers that cannot run without a P. 3986 // So instead we remember the syscall exit time and emit the event 3987 // in execute when we have a P. 3988 _g_.sysexitticks = cputicks() 3989 } 3990 3991 _g_.m.locks-- 3992 3993 // Call the scheduler. 3994 mcall(exitsyscall0) 3995 3996 // Scheduler returned, so we're allowed to run now. 3997 // Delete the syscallsp information that we left for 3998 // the garbage collector during the system call. 3999 // Must wait until now because until gosched returns 4000 // we don't know for sure that the garbage collector 4001 // is not running. 4002 _g_.syscallsp = 0 4003 _g_.m.p.ptr().syscalltick++ 4004 _g_.throwsplit = false 4005 } 4006 4007 //go:nosplit 4008 func exitsyscallfast(oldp *p) bool { 4009 _g_ := getg() 4010 4011 // Freezetheworld sets stopwait but does not retake P's. 4012 if sched.stopwait == freezeStopWait { 4013 return false 4014 } 4015 4016 // Try to re-acquire the last P. 4017 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) { 4018 // There's a cpu for us, so we can run. 4019 wirep(oldp) 4020 exitsyscallfast_reacquired() 4021 return true 4022 } 4023 4024 // Try to get any other idle P. 4025 if sched.pidle != 0 { 4026 var ok bool 4027 systemstack(func() { 4028 ok = exitsyscallfast_pidle() 4029 if ok && trace.enabled { 4030 if oldp != nil { 4031 // Wait till traceGoSysBlock event is emitted. 4032 // This ensures consistency of the trace (the goroutine is started after it is blocked). 4033 for oldp.syscalltick == _g_.m.syscalltick { 4034 osyield() 4035 } 4036 } 4037 traceGoSysExit(0) 4038 } 4039 }) 4040 if ok { 4041 return true 4042 } 4043 } 4044 return false 4045 } 4046 4047 // exitsyscallfast_reacquired is the exitsyscall path on which this G 4048 // has successfully reacquired the P it was running on before the 4049 // syscall. 4050 // 4051 //go:nosplit 4052 func exitsyscallfast_reacquired() { 4053 _g_ := getg() 4054 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 4055 if trace.enabled { 4056 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 4057 // traceGoSysBlock for this syscall was already emitted, 4058 // but here we effectively retake the p from the new syscall running on the same p. 4059 systemstack(func() { 4060 // Denote blocking of the new syscall. 4061 traceGoSysBlock(_g_.m.p.ptr()) 4062 // Denote completion of the current syscall. 4063 traceGoSysExit(0) 4064 }) 4065 } 4066 _g_.m.p.ptr().syscalltick++ 4067 } 4068 } 4069 4070 func exitsyscallfast_pidle() bool { 4071 lock(&sched.lock) 4072 _p_ := pidleget() 4073 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 4074 atomic.Store(&sched.sysmonwait, 0) 4075 notewakeup(&sched.sysmonnote) 4076 } 4077 unlock(&sched.lock) 4078 if _p_ != nil { 4079 acquirep(_p_) 4080 return true 4081 } 4082 return false 4083 } 4084 4085 // exitsyscall slow path on g0. 4086 // Failed to acquire P, enqueue gp as runnable. 4087 // 4088 // Called via mcall, so gp is the calling g from this M. 4089 // 4090 //go:nowritebarrierrec 4091 func exitsyscall0(gp *g) { 4092 casgstatus(gp, _Gsyscall, _Grunnable) 4093 dropg() 4094 lock(&sched.lock) 4095 var _p_ *p 4096 if schedEnabled(gp) { 4097 _p_ = pidleget() 4098 } 4099 var locked bool 4100 if _p_ == nil { 4101 globrunqput(gp) 4102 4103 // Below, we stoplockedm if gp is locked. globrunqput releases 4104 // ownership of gp, so we must check if gp is locked prior to 4105 // committing the release by unlocking sched.lock, otherwise we 4106 // could race with another M transitioning gp from unlocked to 4107 // locked. 4108 locked = gp.lockedm != 0 4109 } else if atomic.Load(&sched.sysmonwait) != 0 { 4110 atomic.Store(&sched.sysmonwait, 0) 4111 notewakeup(&sched.sysmonnote) 4112 } 4113 unlock(&sched.lock) 4114 if _p_ != nil { 4115 acquirep(_p_) 4116 execute(gp, false) // Never returns. 4117 } 4118 if locked { 4119 // Wait until another thread schedules gp and so m again. 4120 // 4121 // N.B. lockedm must be this M, as this g was running on this M 4122 // before entersyscall. 4123 stoplockedm() 4124 execute(gp, false) // Never returns. 4125 } 4126 stopm() 4127 schedule() // Never returns. 4128 } 4129 4130 // Called from syscall package before fork. 4131 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 4132 //go:nosplit 4133 func syscall_runtime_BeforeFork() { 4134 gp := getg().m.curg 4135 4136 // Block signals during a fork, so that the child does not run 4137 // a signal handler before exec if a signal is sent to the process 4138 // group. See issue #18600. 4139 gp.m.locks++ 4140 sigsave(&gp.m.sigmask) 4141 sigblock(false) 4142 4143 // This function is called before fork in syscall package. 4144 // Code between fork and exec must not allocate memory nor even try to grow stack. 4145 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 4146 // runtime_AfterFork will undo this in parent process, but not in child. 4147 gp.stackguard0 = stackFork 4148 } 4149 4150 // Called from syscall package after fork in parent. 4151 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 4152 //go:nosplit 4153 func syscall_runtime_AfterFork() { 4154 gp := getg().m.curg 4155 4156 // See the comments in beforefork. 4157 gp.stackguard0 = gp.stack.lo + _StackGuard 4158 4159 msigrestore(gp.m.sigmask) 4160 4161 gp.m.locks-- 4162 } 4163 4164 // inForkedChild is true while manipulating signals in the child process. 4165 // This is used to avoid calling libc functions in case we are using vfork. 4166 var inForkedChild bool 4167 4168 // Called from syscall package after fork in child. 4169 // It resets non-sigignored signals to the default handler, and 4170 // restores the signal mask in preparation for the exec. 4171 // 4172 // Because this might be called during a vfork, and therefore may be 4173 // temporarily sharing address space with the parent process, this must 4174 // not change any global variables or calling into C code that may do so. 4175 // 4176 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild 4177 //go:nosplit 4178 //go:nowritebarrierrec 4179 func syscall_runtime_AfterForkInChild() { 4180 // It's OK to change the global variable inForkedChild here 4181 // because we are going to change it back. There is no race here, 4182 // because if we are sharing address space with the parent process, 4183 // then the parent process can not be running concurrently. 4184 inForkedChild = true 4185 4186 clearSignalHandlers() 4187 4188 // When we are the child we are the only thread running, 4189 // so we know that nothing else has changed gp.m.sigmask. 4190 msigrestore(getg().m.sigmask) 4191 4192 inForkedChild = false 4193 } 4194 4195 // pendingPreemptSignals is the number of preemption signals 4196 // that have been sent but not received. This is only used on Darwin. 4197 // For #41702. 4198 var pendingPreemptSignals uint32 4199 4200 // Called from syscall package before Exec. 4201 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec 4202 func syscall_runtime_BeforeExec() { 4203 // Prevent thread creation during exec. 4204 execLock.lock() 4205 4206 // On Darwin, wait for all pending preemption signals to 4207 // be received. See issue #41702. 4208 if GOOS == "darwin" || GOOS == "ios" { 4209 for int32(atomic.Load(&pendingPreemptSignals)) > 0 { 4210 osyield() 4211 } 4212 } 4213 } 4214 4215 // Called from syscall package after Exec. 4216 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec 4217 func syscall_runtime_AfterExec() { 4218 execLock.unlock() 4219 } 4220 4221 // Allocate a new g, with a stack big enough for stacksize bytes. 4222 func malg(stacksize int32) *g { 4223 newg := new(g) 4224 if stacksize >= 0 { 4225 stacksize = round2(_StackSystem + stacksize) 4226 systemstack(func() { 4227 newg.stack = stackalloc(uint32(stacksize)) 4228 }) 4229 newg.stackguard0 = newg.stack.lo + _StackGuard 4230 newg.stackguard1 = ^uintptr(0) 4231 // Clear the bottom word of the stack. We record g 4232 // there on gsignal stack during VDSO on ARM and ARM64. 4233 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0 4234 } 4235 return newg 4236 } 4237 4238 // Create a new g running fn. 4239 // Put it on the queue of g's waiting to run. 4240 // The compiler turns a go statement into a call to this. 4241 func newproc(fn *funcval) { 4242 gp := getg() 4243 pc := getcallerpc() 4244 systemstack(func() { 4245 newg := newproc1(fn, gp, pc) 4246 4247 _p_ := getg().m.p.ptr() 4248 runqput(_p_, newg, true) 4249 4250 if mainStarted { 4251 wakep() 4252 } 4253 }) 4254 } 4255 4256 // Create a new g in state _Grunnable, starting at fn. callerpc is the 4257 // address of the go statement that created this. The caller is responsible 4258 // for adding the new g to the scheduler. 4259 func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g { 4260 _g_ := getg() 4261 4262 if fn == nil { 4263 _g_.m.throwing = -1 // do not dump full stacks 4264 throw("go of nil func value") 4265 } 4266 acquirem() // disable preemption because it can be holding p in a local var 4267 4268 _p_ := _g_.m.p.ptr() 4269 newg := gfget(_p_) 4270 if newg == nil { 4271 newg = malg(_StackMin) 4272 casgstatus(newg, _Gidle, _Gdead) 4273 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 4274 } 4275 if newg.stack.hi == 0 { 4276 throw("newproc1: newg missing stack") 4277 } 4278 4279 if readgstatus(newg) != _Gdead { 4280 throw("newproc1: new g is not Gdead") 4281 } 4282 4283 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame 4284 totalSize = alignUp(totalSize, sys.StackAlign) 4285 sp := newg.stack.hi - totalSize 4286 spArg := sp 4287 if usesLR { 4288 // caller's LR 4289 *(*uintptr)(unsafe.Pointer(sp)) = 0 4290 prepGoExitFrame(sp) 4291 spArg += sys.MinFrameSize 4292 } 4293 4294 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 4295 newg.sched.sp = sp 4296 newg.stktopsp = sp 4297 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 4298 newg.sched.g = guintptr(unsafe.Pointer(newg)) 4299 gostartcallfn(&newg.sched, fn) 4300 newg.gopc = callerpc 4301 newg.ancestors = saveAncestors(callergp) 4302 newg.startpc = fn.fn 4303 if _g_.m.curg != nil { 4304 newg.labels = _g_.m.curg.labels 4305 } 4306 if isSystemGoroutine(newg, false) { 4307 atomic.Xadd(&sched.ngsys, +1) 4308 } 4309 // Track initial transition? 4310 newg.trackingSeq = uint8(fastrand()) 4311 if newg.trackingSeq%gTrackingPeriod == 0 { 4312 newg.tracking = true 4313 } 4314 casgstatus(newg, _Gdead, _Grunnable) 4315 gcController.addScannableStack(_p_, int64(newg.stack.hi-newg.stack.lo)) 4316 4317 if _p_.goidcache == _p_.goidcacheend { 4318 // Sched.goidgen is the last allocated id, 4319 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 4320 // At startup sched.goidgen=0, so main goroutine receives goid=1. 4321 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 4322 _p_.goidcache -= _GoidCacheBatch - 1 4323 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 4324 } 4325 newg.goid = int64(_p_.goidcache) 4326 _p_.goidcache++ 4327 if raceenabled { 4328 newg.racectx = racegostart(callerpc) 4329 } 4330 if trace.enabled { 4331 traceGoCreate(newg, newg.startpc) 4332 } 4333 releasem(_g_.m) 4334 4335 return newg 4336 } 4337 4338 // saveAncestors copies previous ancestors of the given caller g and 4339 // includes infor for the current caller into a new set of tracebacks for 4340 // a g being created. 4341 func saveAncestors(callergp *g) *[]ancestorInfo { 4342 // Copy all prior info, except for the root goroutine (goid 0). 4343 if debug.tracebackancestors <= 0 || callergp.goid == 0 { 4344 return nil 4345 } 4346 var callerAncestors []ancestorInfo 4347 if callergp.ancestors != nil { 4348 callerAncestors = *callergp.ancestors 4349 } 4350 n := int32(len(callerAncestors)) + 1 4351 if n > debug.tracebackancestors { 4352 n = debug.tracebackancestors 4353 } 4354 ancestors := make([]ancestorInfo, n) 4355 copy(ancestors[1:], callerAncestors) 4356 4357 var pcs [_TracebackMaxFrames]uintptr 4358 npcs := gcallers(callergp, 0, pcs[:]) 4359 ipcs := make([]uintptr, npcs) 4360 copy(ipcs, pcs[:]) 4361 ancestors[0] = ancestorInfo{ 4362 pcs: ipcs, 4363 goid: callergp.goid, 4364 gopc: callergp.gopc, 4365 } 4366 4367 ancestorsp := new([]ancestorInfo) 4368 *ancestorsp = ancestors 4369 return ancestorsp 4370 } 4371 4372 // Put on gfree list. 4373 // If local list is too long, transfer a batch to the global list. 4374 func gfput(_p_ *p, gp *g) { 4375 if readgstatus(gp) != _Gdead { 4376 throw("gfput: bad status (not Gdead)") 4377 } 4378 4379 stksize := gp.stack.hi - gp.stack.lo 4380 4381 if stksize != _FixedStack { 4382 // non-standard stack size - free it. 4383 stackfree(gp.stack) 4384 gp.stack.lo = 0 4385 gp.stack.hi = 0 4386 gp.stackguard0 = 0 4387 } 4388 4389 _p_.gFree.push(gp) 4390 _p_.gFree.n++ 4391 if _p_.gFree.n >= 64 { 4392 var ( 4393 inc int32 4394 stackQ gQueue 4395 noStackQ gQueue 4396 ) 4397 for _p_.gFree.n >= 32 { 4398 gp = _p_.gFree.pop() 4399 _p_.gFree.n-- 4400 if gp.stack.lo == 0 { 4401 noStackQ.push(gp) 4402 } else { 4403 stackQ.push(gp) 4404 } 4405 inc++ 4406 } 4407 lock(&sched.gFree.lock) 4408 sched.gFree.noStack.pushAll(noStackQ) 4409 sched.gFree.stack.pushAll(stackQ) 4410 sched.gFree.n += inc 4411 unlock(&sched.gFree.lock) 4412 } 4413 } 4414 4415 // Get from gfree list. 4416 // If local list is empty, grab a batch from global list. 4417 func gfget(_p_ *p) *g { 4418 retry: 4419 if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) { 4420 lock(&sched.gFree.lock) 4421 // Move a batch of free Gs to the P. 4422 for _p_.gFree.n < 32 { 4423 // Prefer Gs with stacks. 4424 gp := sched.gFree.stack.pop() 4425 if gp == nil { 4426 gp = sched.gFree.noStack.pop() 4427 if gp == nil { 4428 break 4429 } 4430 } 4431 sched.gFree.n-- 4432 _p_.gFree.push(gp) 4433 _p_.gFree.n++ 4434 } 4435 unlock(&sched.gFree.lock) 4436 goto retry 4437 } 4438 gp := _p_.gFree.pop() 4439 if gp == nil { 4440 return nil 4441 } 4442 _p_.gFree.n-- 4443 if gp.stack.lo == 0 { 4444 // Stack was deallocated in gfput. Allocate a new one. 4445 systemstack(func() { 4446 gp.stack = stackalloc(_FixedStack) 4447 }) 4448 gp.stackguard0 = gp.stack.lo + _StackGuard 4449 } else { 4450 if raceenabled { 4451 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 4452 } 4453 if msanenabled { 4454 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 4455 } 4456 if asanenabled { 4457 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 4458 } 4459 } 4460 return gp 4461 } 4462 4463 // Purge all cached G's from gfree list to the global list. 4464 func gfpurge(_p_ *p) { 4465 var ( 4466 inc int32 4467 stackQ gQueue 4468 noStackQ gQueue 4469 ) 4470 for !_p_.gFree.empty() { 4471 gp := _p_.gFree.pop() 4472 _p_.gFree.n-- 4473 if gp.stack.lo == 0 { 4474 noStackQ.push(gp) 4475 } else { 4476 stackQ.push(gp) 4477 } 4478 inc++ 4479 } 4480 lock(&sched.gFree.lock) 4481 sched.gFree.noStack.pushAll(noStackQ) 4482 sched.gFree.stack.pushAll(stackQ) 4483 sched.gFree.n += inc 4484 unlock(&sched.gFree.lock) 4485 } 4486 4487 // Breakpoint executes a breakpoint trap. 4488 func Breakpoint() { 4489 breakpoint() 4490 } 4491 4492 // dolockOSThread is called by LockOSThread and lockOSThread below 4493 // after they modify m.locked. Do not allow preemption during this call, 4494 // or else the m might be different in this function than in the caller. 4495 //go:nosplit 4496 func dolockOSThread() { 4497 if GOARCH == "wasm" { 4498 return // no threads on wasm yet 4499 } 4500 _g_ := getg() 4501 _g_.m.lockedg.set(_g_) 4502 _g_.lockedm.set(_g_.m) 4503 } 4504 4505 //go:nosplit 4506 4507 // LockOSThread wires the calling goroutine to its current operating system thread. 4508 // The calling goroutine will always execute in that thread, 4509 // and no other goroutine will execute in it, 4510 // until the calling goroutine has made as many calls to 4511 // UnlockOSThread as to LockOSThread. 4512 // If the calling goroutine exits without unlocking the thread, 4513 // the thread will be terminated. 4514 // 4515 // All init functions are run on the startup thread. Calling LockOSThread 4516 // from an init function will cause the main function to be invoked on 4517 // that thread. 4518 // 4519 // A goroutine should call LockOSThread before calling OS services or 4520 // non-Go library functions that depend on per-thread state. 4521 func LockOSThread() { 4522 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" { 4523 // If we need to start a new thread from the locked 4524 // thread, we need the template thread. Start it now 4525 // while we're in a known-good state. 4526 startTemplateThread() 4527 } 4528 _g_ := getg() 4529 _g_.m.lockedExt++ 4530 if _g_.m.lockedExt == 0 { 4531 _g_.m.lockedExt-- 4532 panic("LockOSThread nesting overflow") 4533 } 4534 dolockOSThread() 4535 } 4536 4537 //go:nosplit 4538 func lockOSThread() { 4539 getg().m.lockedInt++ 4540 dolockOSThread() 4541 } 4542 4543 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 4544 // after they update m->locked. Do not allow preemption during this call, 4545 // or else the m might be in different in this function than in the caller. 4546 //go:nosplit 4547 func dounlockOSThread() { 4548 if GOARCH == "wasm" { 4549 return // no threads on wasm yet 4550 } 4551 _g_ := getg() 4552 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 { 4553 return 4554 } 4555 _g_.m.lockedg = 0 4556 _g_.lockedm = 0 4557 } 4558 4559 //go:nosplit 4560 4561 // UnlockOSThread undoes an earlier call to LockOSThread. 4562 // If this drops the number of active LockOSThread calls on the 4563 // calling goroutine to zero, it unwires the calling goroutine from 4564 // its fixed operating system thread. 4565 // If there are no active LockOSThread calls, this is a no-op. 4566 // 4567 // Before calling UnlockOSThread, the caller must ensure that the OS 4568 // thread is suitable for running other goroutines. If the caller made 4569 // any permanent changes to the state of the thread that would affect 4570 // other goroutines, it should not call this function and thus leave 4571 // the goroutine locked to the OS thread until the goroutine (and 4572 // hence the thread) exits. 4573 func UnlockOSThread() { 4574 _g_ := getg() 4575 if _g_.m.lockedExt == 0 { 4576 return 4577 } 4578 _g_.m.lockedExt-- 4579 dounlockOSThread() 4580 } 4581 4582 //go:nosplit 4583 func unlockOSThread() { 4584 _g_ := getg() 4585 if _g_.m.lockedInt == 0 { 4586 systemstack(badunlockosthread) 4587 } 4588 _g_.m.lockedInt-- 4589 dounlockOSThread() 4590 } 4591 4592 func badunlockosthread() { 4593 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 4594 } 4595 4596 func gcount() int32 { 4597 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - int32(atomic.Load(&sched.ngsys)) 4598 for _, _p_ := range allp { 4599 n -= _p_.gFree.n 4600 } 4601 4602 // All these variables can be changed concurrently, so the result can be inconsistent. 4603 // But at least the current goroutine is running. 4604 if n < 1 { 4605 n = 1 4606 } 4607 return n 4608 } 4609 4610 func mcount() int32 { 4611 return int32(sched.mnext - sched.nmfreed) 4612 } 4613 4614 var prof struct { 4615 signalLock uint32 4616 hz int32 4617 } 4618 4619 func _System() { _System() } 4620 func _ExternalCode() { _ExternalCode() } 4621 func _LostExternalCode() { _LostExternalCode() } 4622 func _GC() { _GC() } 4623 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } 4624 func _VDSO() { _VDSO() } 4625 4626 // Called if we receive a SIGPROF signal. 4627 // Called by the signal handler, may run during STW. 4628 //go:nowritebarrierrec 4629 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 4630 if prof.hz == 0 { 4631 return 4632 } 4633 4634 // If mp.profilehz is 0, then profiling is not enabled for this thread. 4635 // We must check this to avoid a deadlock between setcpuprofilerate 4636 // and the call to cpuprof.add, below. 4637 if mp != nil && mp.profilehz == 0 { 4638 return 4639 } 4640 4641 // On mips{,le}/arm, 64bit atomics are emulated with spinlocks, in 4642 // runtime/internal/atomic. If SIGPROF arrives while the program is inside 4643 // the critical section, it creates a deadlock (when writing the sample). 4644 // As a workaround, create a counter of SIGPROFs while in critical section 4645 // to store the count, and pass it to sigprof.add() later when SIGPROF is 4646 // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc). 4647 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" { 4648 if f := findfunc(pc); f.valid() { 4649 if hasPrefix(funcname(f), "runtime/internal/atomic") { 4650 cpuprof.lostAtomic++ 4651 return 4652 } 4653 } 4654 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 { 4655 // runtime/internal/atomic functions call into kernel 4656 // helpers on arm < 7. See 4657 // runtime/internal/atomic/sys_linux_arm.s. 4658 cpuprof.lostAtomic++ 4659 return 4660 } 4661 } 4662 4663 // Profiling runs concurrently with GC, so it must not allocate. 4664 // Set a trap in case the code does allocate. 4665 // Note that on windows, one thread takes profiles of all the 4666 // other threads, so mp is usually not getg().m. 4667 // In fact mp may not even be stopped. 4668 // See golang.org/issue/17165. 4669 getg().m.mallocing++ 4670 4671 var stk [maxCPUProfStack]uintptr 4672 flags := uint(_TraceJumpStack) 4673 n := 0 4674 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 4675 cgoOff := 0 4676 // Check cgoCallersUse to make sure that we are not 4677 // interrupting other code that is fiddling with 4678 // cgoCallers. We are running in a signal handler 4679 // with all signals blocked, so we don't have to worry 4680 // about any other code interrupting us. 4681 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 { 4682 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { 4683 cgoOff++ 4684 } 4685 copy(stk[:], mp.cgoCallers[:cgoOff]) 4686 mp.cgoCallers[0] = 0 4687 } 4688 4689 // Collect Go stack that leads to the cgo call. 4690 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, flags) 4691 if n > 0 { 4692 n += cgoOff 4693 } 4694 } else { 4695 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|flags) 4696 } 4697 4698 if n <= 0 { 4699 // Normal traceback is impossible or has failed. 4700 // See if it falls into several common cases. 4701 n = 0 4702 if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 4703 // Libcall, i.e. runtime syscall on windows. 4704 // Collect Go stack that leads to the call. 4705 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, flags) 4706 } 4707 if n == 0 && mp != nil && mp.vdsoSP != 0 { 4708 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, flags) 4709 } 4710 if n == 0 { 4711 // If all of the above has failed, account it against abstract "System" or "GC". 4712 n = 2 4713 if inVDSOPage(pc) { 4714 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum 4715 } else if pc > firstmoduledata.etext { 4716 // "ExternalCode" is better than "etext". 4717 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum 4718 } 4719 stk[0] = pc 4720 if mp.preemptoff != "" { 4721 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum 4722 } else { 4723 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum 4724 } 4725 } 4726 } 4727 4728 if prof.hz != 0 { 4729 // Note: it can happen on Windows that we interrupted a system thread 4730 // with no g, so gp could nil. The other nil checks are done out of 4731 // caution, but not expected to be nil in practice. 4732 var tagPtr *unsafe.Pointer 4733 if gp != nil && gp.m != nil && gp.m.curg != nil { 4734 tagPtr = &gp.m.curg.labels 4735 } 4736 cpuprof.add(tagPtr, stk[:n]) 4737 } 4738 getg().m.mallocing-- 4739 } 4740 4741 // setcpuprofilerate sets the CPU profiling rate to hz times per second. 4742 // If hz <= 0, setcpuprofilerate turns off CPU profiling. 4743 func setcpuprofilerate(hz int32) { 4744 // Force sane arguments. 4745 if hz < 0 { 4746 hz = 0 4747 } 4748 4749 // Disable preemption, otherwise we can be rescheduled to another thread 4750 // that has profiling enabled. 4751 _g_ := getg() 4752 _g_.m.locks++ 4753 4754 // Stop profiler on this thread so that it is safe to lock prof. 4755 // if a profiling signal came in while we had prof locked, 4756 // it would deadlock. 4757 setThreadCPUProfiler(0) 4758 4759 for !atomic.Cas(&prof.signalLock, 0, 1) { 4760 osyield() 4761 } 4762 if prof.hz != hz { 4763 setProcessCPUProfiler(hz) 4764 prof.hz = hz 4765 } 4766 atomic.Store(&prof.signalLock, 0) 4767 4768 lock(&sched.lock) 4769 sched.profilehz = hz 4770 unlock(&sched.lock) 4771 4772 if hz != 0 { 4773 setThreadCPUProfiler(hz) 4774 } 4775 4776 _g_.m.locks-- 4777 } 4778 4779 // init initializes pp, which may be a freshly allocated p or a 4780 // previously destroyed p, and transitions it to status _Pgcstop. 4781 func (pp *p) init(id int32) { 4782 pp.id = id 4783 pp.status = _Pgcstop 4784 pp.sudogcache = pp.sudogbuf[:0] 4785 pp.deferpool = pp.deferpoolbuf[:0] 4786 pp.wbBuf.reset() 4787 if pp.mcache == nil { 4788 if id == 0 { 4789 if mcache0 == nil { 4790 throw("missing mcache?") 4791 } 4792 // Use the bootstrap mcache0. Only one P will get 4793 // mcache0: the one with ID 0. 4794 pp.mcache = mcache0 4795 } else { 4796 pp.mcache = allocmcache() 4797 } 4798 } 4799 if raceenabled && pp.raceprocctx == 0 { 4800 if id == 0 { 4801 pp.raceprocctx = raceprocctx0 4802 raceprocctx0 = 0 // bootstrap 4803 } else { 4804 pp.raceprocctx = raceproccreate() 4805 } 4806 } 4807 lockInit(&pp.timersLock, lockRankTimers) 4808 4809 // This P may get timers when it starts running. Set the mask here 4810 // since the P may not go through pidleget (notably P 0 on startup). 4811 timerpMask.set(id) 4812 // Similarly, we may not go through pidleget before this P starts 4813 // running if it is P 0 on startup. 4814 idlepMask.clear(id) 4815 } 4816 4817 // destroy releases all of the resources associated with pp and 4818 // transitions it to status _Pdead. 4819 // 4820 // sched.lock must be held and the world must be stopped. 4821 func (pp *p) destroy() { 4822 assertLockHeld(&sched.lock) 4823 assertWorldStopped() 4824 4825 // Move all runnable goroutines to the global queue 4826 for pp.runqhead != pp.runqtail { 4827 // Pop from tail of local queue 4828 pp.runqtail-- 4829 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr() 4830 // Push onto head of global queue 4831 globrunqputhead(gp) 4832 } 4833 if pp.runnext != 0 { 4834 globrunqputhead(pp.runnext.ptr()) 4835 pp.runnext = 0 4836 } 4837 if len(pp.timers) > 0 { 4838 plocal := getg().m.p.ptr() 4839 // The world is stopped, but we acquire timersLock to 4840 // protect against sysmon calling timeSleepUntil. 4841 // This is the only case where we hold the timersLock of 4842 // more than one P, so there are no deadlock concerns. 4843 lock(&plocal.timersLock) 4844 lock(&pp.timersLock) 4845 moveTimers(plocal, pp.timers) 4846 pp.timers = nil 4847 pp.numTimers = 0 4848 pp.deletedTimers = 0 4849 atomic.Store64(&pp.timer0When, 0) 4850 unlock(&pp.timersLock) 4851 unlock(&plocal.timersLock) 4852 } 4853 // Flush p's write barrier buffer. 4854 if gcphase != _GCoff { 4855 wbBufFlush1(pp) 4856 pp.gcw.dispose() 4857 } 4858 for i := range pp.sudogbuf { 4859 pp.sudogbuf[i] = nil 4860 } 4861 pp.sudogcache = pp.sudogbuf[:0] 4862 for j := range pp.deferpoolbuf { 4863 pp.deferpoolbuf[j] = nil 4864 } 4865 pp.deferpool = pp.deferpoolbuf[:0] 4866 systemstack(func() { 4867 for i := 0; i < pp.mspancache.len; i++ { 4868 // Safe to call since the world is stopped. 4869 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i])) 4870 } 4871 pp.mspancache.len = 0 4872 lock(&mheap_.lock) 4873 pp.pcache.flush(&mheap_.pages) 4874 unlock(&mheap_.lock) 4875 }) 4876 freemcache(pp.mcache) 4877 pp.mcache = nil 4878 gfpurge(pp) 4879 traceProcFree(pp) 4880 if raceenabled { 4881 if pp.timerRaceCtx != 0 { 4882 // The race detector code uses a callback to fetch 4883 // the proc context, so arrange for that callback 4884 // to see the right thing. 4885 // This hack only works because we are the only 4886 // thread running. 4887 mp := getg().m 4888 phold := mp.p.ptr() 4889 mp.p.set(pp) 4890 4891 racectxend(pp.timerRaceCtx) 4892 pp.timerRaceCtx = 0 4893 4894 mp.p.set(phold) 4895 } 4896 raceprocdestroy(pp.raceprocctx) 4897 pp.raceprocctx = 0 4898 } 4899 pp.gcAssistTime = 0 4900 pp.status = _Pdead 4901 } 4902 4903 // Change number of processors. 4904 // 4905 // sched.lock must be held, and the world must be stopped. 4906 // 4907 // gcworkbufs must not be being modified by either the GC or the write barrier 4908 // code, so the GC must not be running if the number of Ps actually changes. 4909 // 4910 // Returns list of Ps with local work, they need to be scheduled by the caller. 4911 func procresize(nprocs int32) *p { 4912 assertLockHeld(&sched.lock) 4913 assertWorldStopped() 4914 4915 old := gomaxprocs 4916 if old < 0 || nprocs <= 0 { 4917 throw("procresize: invalid arg") 4918 } 4919 if trace.enabled { 4920 traceGomaxprocs(nprocs) 4921 } 4922 4923 // update statistics 4924 now := nanotime() 4925 if sched.procresizetime != 0 { 4926 sched.totaltime += int64(old) * (now - sched.procresizetime) 4927 } 4928 sched.procresizetime = now 4929 4930 maskWords := (nprocs + 31) / 32 4931 4932 // Grow allp if necessary. 4933 if nprocs > int32(len(allp)) { 4934 // Synchronize with retake, which could be running 4935 // concurrently since it doesn't run on a P. 4936 lock(&allpLock) 4937 if nprocs <= int32(cap(allp)) { 4938 allp = allp[:nprocs] 4939 } else { 4940 nallp := make([]*p, nprocs) 4941 // Copy everything up to allp's cap so we 4942 // never lose old allocated Ps. 4943 copy(nallp, allp[:cap(allp)]) 4944 allp = nallp 4945 } 4946 4947 if maskWords <= int32(cap(idlepMask)) { 4948 idlepMask = idlepMask[:maskWords] 4949 timerpMask = timerpMask[:maskWords] 4950 } else { 4951 nidlepMask := make([]uint32, maskWords) 4952 // No need to copy beyond len, old Ps are irrelevant. 4953 copy(nidlepMask, idlepMask) 4954 idlepMask = nidlepMask 4955 4956 ntimerpMask := make([]uint32, maskWords) 4957 copy(ntimerpMask, timerpMask) 4958 timerpMask = ntimerpMask 4959 } 4960 unlock(&allpLock) 4961 } 4962 4963 // initialize new P's 4964 for i := old; i < nprocs; i++ { 4965 pp := allp[i] 4966 if pp == nil { 4967 pp = new(p) 4968 } 4969 pp.init(i) 4970 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 4971 } 4972 4973 _g_ := getg() 4974 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 4975 // continue to use the current P 4976 _g_.m.p.ptr().status = _Prunning 4977 _g_.m.p.ptr().mcache.prepareForSweep() 4978 } else { 4979 // release the current P and acquire allp[0]. 4980 // 4981 // We must do this before destroying our current P 4982 // because p.destroy itself has write barriers, so we 4983 // need to do that from a valid P. 4984 if _g_.m.p != 0 { 4985 if trace.enabled { 4986 // Pretend that we were descheduled 4987 // and then scheduled again to keep 4988 // the trace sane. 4989 traceGoSched() 4990 traceProcStop(_g_.m.p.ptr()) 4991 } 4992 _g_.m.p.ptr().m = 0 4993 } 4994 _g_.m.p = 0 4995 p := allp[0] 4996 p.m = 0 4997 p.status = _Pidle 4998 acquirep(p) 4999 if trace.enabled { 5000 traceGoStart() 5001 } 5002 } 5003 5004 // g.m.p is now set, so we no longer need mcache0 for bootstrapping. 5005 mcache0 = nil 5006 5007 // release resources from unused P's 5008 for i := nprocs; i < old; i++ { 5009 p := allp[i] 5010 p.destroy() 5011 // can't free P itself because it can be referenced by an M in syscall 5012 } 5013 5014 // Trim allp. 5015 if int32(len(allp)) != nprocs { 5016 lock(&allpLock) 5017 allp = allp[:nprocs] 5018 idlepMask = idlepMask[:maskWords] 5019 timerpMask = timerpMask[:maskWords] 5020 unlock(&allpLock) 5021 } 5022 5023 var runnablePs *p 5024 for i := nprocs - 1; i >= 0; i-- { 5025 p := allp[i] 5026 if _g_.m.p.ptr() == p { 5027 continue 5028 } 5029 p.status = _Pidle 5030 if runqempty(p) { 5031 pidleput(p) 5032 } else { 5033 p.m.set(mget()) 5034 p.link.set(runnablePs) 5035 runnablePs = p 5036 } 5037 } 5038 stealOrder.reset(uint32(nprocs)) 5039 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 5040 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 5041 return runnablePs 5042 } 5043 5044 // Associate p and the current m. 5045 // 5046 // This function is allowed to have write barriers even if the caller 5047 // isn't because it immediately acquires _p_. 5048 // 5049 //go:yeswritebarrierrec 5050 func acquirep(_p_ *p) { 5051 // Do the part that isn't allowed to have write barriers. 5052 wirep(_p_) 5053 5054 // Have p; write barriers now allowed. 5055 5056 // Perform deferred mcache flush before this P can allocate 5057 // from a potentially stale mcache. 5058 _p_.mcache.prepareForSweep() 5059 5060 if trace.enabled { 5061 traceProcStart() 5062 } 5063 } 5064 5065 // wirep is the first step of acquirep, which actually associates the 5066 // current M to _p_. This is broken out so we can disallow write 5067 // barriers for this part, since we don't yet have a P. 5068 // 5069 //go:nowritebarrierrec 5070 //go:nosplit 5071 func wirep(_p_ *p) { 5072 _g_ := getg() 5073 5074 if _g_.m.p != 0 { 5075 throw("wirep: already in go") 5076 } 5077 if _p_.m != 0 || _p_.status != _Pidle { 5078 id := int64(0) 5079 if _p_.m != 0 { 5080 id = _p_.m.ptr().id 5081 } 5082 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 5083 throw("wirep: invalid p state") 5084 } 5085 _g_.m.p.set(_p_) 5086 _p_.m.set(_g_.m) 5087 _p_.status = _Prunning 5088 } 5089 5090 // Disassociate p and the current m. 5091 func releasep() *p { 5092 _g_ := getg() 5093 5094 if _g_.m.p == 0 { 5095 throw("releasep: invalid arg") 5096 } 5097 _p_ := _g_.m.p.ptr() 5098 if _p_.m.ptr() != _g_.m || _p_.status != _Prunning { 5099 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n") 5100 throw("releasep: invalid p state") 5101 } 5102 if trace.enabled { 5103 traceProcStop(_g_.m.p.ptr()) 5104 } 5105 _g_.m.p = 0 5106 _p_.m = 0 5107 _p_.status = _Pidle 5108 return _p_ 5109 } 5110 5111 func incidlelocked(v int32) { 5112 lock(&sched.lock) 5113 sched.nmidlelocked += v 5114 if v > 0 { 5115 checkdead() 5116 } 5117 unlock(&sched.lock) 5118 } 5119 5120 // Check for deadlock situation. 5121 // The check is based on number of running M's, if 0 -> deadlock. 5122 // sched.lock must be held. 5123 func checkdead() { 5124 assertLockHeld(&sched.lock) 5125 5126 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 5127 // there are no running goroutines. The calling program is 5128 // assumed to be running. 5129 if islibrary || isarchive { 5130 return 5131 } 5132 5133 // If we are dying because of a signal caught on an already idle thread, 5134 // freezetheworld will cause all running threads to block. 5135 // And runtime will essentially enter into deadlock state, 5136 // except that there is a thread that will call exit soon. 5137 if panicking > 0 { 5138 return 5139 } 5140 5141 // If we are not running under cgo, but we have an extra M then account 5142 // for it. (It is possible to have an extra M on Windows without cgo to 5143 // accommodate callbacks created by syscall.NewCallback. See issue #6751 5144 // for details.) 5145 var run0 int32 5146 if !iscgo && cgoHasExtraM { 5147 mp := lockextra(true) 5148 haveExtraM := extraMCount > 0 5149 unlockextra(mp) 5150 if haveExtraM { 5151 run0 = 1 5152 } 5153 } 5154 5155 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys 5156 if run > run0 { 5157 return 5158 } 5159 if run < 0 { 5160 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n") 5161 throw("checkdead: inconsistent counts") 5162 } 5163 5164 grunning := 0 5165 forEachG(func(gp *g) { 5166 if isSystemGoroutine(gp, false) { 5167 return 5168 } 5169 s := readgstatus(gp) 5170 switch s &^ _Gscan { 5171 case _Gwaiting, 5172 _Gpreempted: 5173 grunning++ 5174 case _Grunnable, 5175 _Grunning, 5176 _Gsyscall: 5177 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 5178 throw("checkdead: runnable g") 5179 } 5180 }) 5181 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 5182 unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang 5183 throw("no goroutines (main called runtime.Goexit) - deadlock!") 5184 } 5185 5186 // Maybe jump time forward for playground. 5187 if faketime != 0 { 5188 when, _p_ := timeSleepUntil() 5189 if _p_ != nil { 5190 faketime = when 5191 for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link { 5192 if (*pp).ptr() == _p_ { 5193 *pp = _p_.link 5194 break 5195 } 5196 } 5197 mp := mget() 5198 if mp == nil { 5199 // There should always be a free M since 5200 // nothing is running. 5201 throw("checkdead: no m for timer") 5202 } 5203 mp.nextp.set(_p_) 5204 notewakeup(&mp.park) 5205 return 5206 } 5207 } 5208 5209 // There are no goroutines running, so we can look at the P's. 5210 for _, _p_ := range allp { 5211 if len(_p_.timers) > 0 { 5212 return 5213 } 5214 } 5215 5216 getg().m.throwing = -1 // do not dump full stacks 5217 unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang 5218 throw("all goroutines are asleep - deadlock!") 5219 } 5220 5221 // forcegcperiod is the maximum time in nanoseconds between garbage 5222 // collections. If we go this long without a garbage collection, one 5223 // is forced to run. 5224 // 5225 // This is a variable for testing purposes. It normally doesn't change. 5226 var forcegcperiod int64 = 2 * 60 * 1e9 5227 5228 // needSysmonWorkaround is true if the workaround for 5229 // golang.org/issue/42515 is needed on NetBSD. 5230 var needSysmonWorkaround bool = false 5231 5232 // Always runs without a P, so write barriers are not allowed. 5233 // 5234 //go:nowritebarrierrec 5235 func sysmon() { 5236 lock(&sched.lock) 5237 sched.nmsys++ 5238 checkdead() 5239 unlock(&sched.lock) 5240 5241 // For syscall_runtime_doAllThreadsSyscall, sysmon is 5242 // sufficiently up to participate in fixups. 5243 atomic.Store(&sched.sysmonStarting, 0) 5244 5245 lasttrace := int64(0) 5246 idle := 0 // how many cycles in succession we had not wokeup somebody 5247 delay := uint32(0) 5248 5249 for { 5250 if idle == 0 { // start with 20us sleep... 5251 delay = 20 5252 } else if idle > 50 { // start doubling the sleep after 1ms... 5253 delay *= 2 5254 } 5255 if delay > 10*1000 { // up to 10ms 5256 delay = 10 * 1000 5257 } 5258 usleep(delay) 5259 mDoFixup() 5260 5261 // sysmon should not enter deep sleep if schedtrace is enabled so that 5262 // it can print that information at the right time. 5263 // 5264 // It should also not enter deep sleep if there are any active P's so 5265 // that it can retake P's from syscalls, preempt long running G's, and 5266 // poll the network if all P's are busy for long stretches. 5267 // 5268 // It should wakeup from deep sleep if any P's become active either due 5269 // to exiting a syscall or waking up due to a timer expiring so that it 5270 // can resume performing those duties. If it wakes from a syscall it 5271 // resets idle and delay as a bet that since it had retaken a P from a 5272 // syscall before, it may need to do it again shortly after the 5273 // application starts work again. It does not reset idle when waking 5274 // from a timer to avoid adding system load to applications that spend 5275 // most of their time sleeping. 5276 now := nanotime() 5277 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { 5278 lock(&sched.lock) 5279 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 5280 syscallWake := false 5281 next, _ := timeSleepUntil() 5282 if next > now { 5283 atomic.Store(&sched.sysmonwait, 1) 5284 unlock(&sched.lock) 5285 // Make wake-up period small enough 5286 // for the sampling to be correct. 5287 sleep := forcegcperiod / 2 5288 if next-now < sleep { 5289 sleep = next - now 5290 } 5291 shouldRelax := sleep >= osRelaxMinNS 5292 if shouldRelax { 5293 osRelax(true) 5294 } 5295 syscallWake = notetsleep(&sched.sysmonnote, sleep) 5296 mDoFixup() 5297 if shouldRelax { 5298 osRelax(false) 5299 } 5300 lock(&sched.lock) 5301 atomic.Store(&sched.sysmonwait, 0) 5302 noteclear(&sched.sysmonnote) 5303 } 5304 if syscallWake { 5305 idle = 0 5306 delay = 20 5307 } 5308 } 5309 unlock(&sched.lock) 5310 } 5311 5312 lock(&sched.sysmonlock) 5313 // Update now in case we blocked on sysmonnote or spent a long time 5314 // blocked on schedlock or sysmonlock above. 5315 now = nanotime() 5316 5317 // trigger libc interceptors if needed 5318 if *cgo_yield != nil { 5319 asmcgocall(*cgo_yield, nil) 5320 } 5321 // poll network if not polled for more than 10ms 5322 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 5323 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now { 5324 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 5325 list := netpoll(0) // non-blocking - returns list of goroutines 5326 if !list.empty() { 5327 // Need to decrement number of idle locked M's 5328 // (pretending that one more is running) before injectglist. 5329 // Otherwise it can lead to the following situation: 5330 // injectglist grabs all P's but before it starts M's to run the P's, 5331 // another M returns from syscall, finishes running its G, 5332 // observes that there is no work to do and no other running M's 5333 // and reports deadlock. 5334 incidlelocked(-1) 5335 injectglist(&list) 5336 incidlelocked(1) 5337 } 5338 } 5339 mDoFixup() 5340 if GOOS == "netbsd" && needSysmonWorkaround { 5341 // netpoll is responsible for waiting for timer 5342 // expiration, so we typically don't have to worry 5343 // about starting an M to service timers. (Note that 5344 // sleep for timeSleepUntil above simply ensures sysmon 5345 // starts running again when that timer expiration may 5346 // cause Go code to run again). 5347 // 5348 // However, netbsd has a kernel bug that sometimes 5349 // misses netpollBreak wake-ups, which can lead to 5350 // unbounded delays servicing timers. If we detect this 5351 // overrun, then startm to get something to handle the 5352 // timer. 5353 // 5354 // See issue 42515 and 5355 // https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094. 5356 if next, _ := timeSleepUntil(); next < now { 5357 startm(nil, false) 5358 } 5359 } 5360 if atomic.Load(&scavenge.sysmonWake) != 0 { 5361 // Kick the scavenger awake if someone requested it. 5362 wakeScavenger() 5363 } 5364 // retake P's blocked in syscalls 5365 // and preempt long running G's 5366 if retake(now) != 0 { 5367 idle = 0 5368 } else { 5369 idle++ 5370 } 5371 // check if we need to force a GC 5372 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { 5373 lock(&forcegc.lock) 5374 forcegc.idle = 0 5375 var list gList 5376 list.push(forcegc.g) 5377 injectglist(&list) 5378 unlock(&forcegc.lock) 5379 } 5380 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { 5381 lasttrace = now 5382 schedtrace(debug.scheddetail > 0) 5383 } 5384 unlock(&sched.sysmonlock) 5385 } 5386 } 5387 5388 type sysmontick struct { 5389 schedtick uint32 5390 schedwhen int64 5391 syscalltick uint32 5392 syscallwhen int64 5393 } 5394 5395 // forcePreemptNS is the time slice given to a G before it is 5396 // preempted. 5397 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 5398 5399 func retake(now int64) uint32 { 5400 n := 0 5401 // Prevent allp slice changes. This lock will be completely 5402 // uncontended unless we're already stopping the world. 5403 lock(&allpLock) 5404 // We can't use a range loop over allp because we may 5405 // temporarily drop the allpLock. Hence, we need to re-fetch 5406 // allp each time around the loop. 5407 for i := 0; i < len(allp); i++ { 5408 _p_ := allp[i] 5409 if _p_ == nil { 5410 // This can happen if procresize has grown 5411 // allp but not yet created new Ps. 5412 continue 5413 } 5414 pd := &_p_.sysmontick 5415 s := _p_.status 5416 sysretake := false 5417 if s == _Prunning || s == _Psyscall { 5418 // Preempt G if it's running for too long. 5419 t := int64(_p_.schedtick) 5420 if int64(pd.schedtick) != t { 5421 pd.schedtick = uint32(t) 5422 pd.schedwhen = now 5423 } else if pd.schedwhen+forcePreemptNS <= now { 5424 preemptone(_p_) 5425 // In case of syscall, preemptone() doesn't 5426 // work, because there is no M wired to P. 5427 sysretake = true 5428 } 5429 } 5430 if s == _Psyscall { 5431 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 5432 t := int64(_p_.syscalltick) 5433 if !sysretake && int64(pd.syscalltick) != t { 5434 pd.syscalltick = uint32(t) 5435 pd.syscallwhen = now 5436 continue 5437 } 5438 // On the one hand we don't want to retake Ps if there is no other work to do, 5439 // but on the other hand we want to retake them eventually 5440 // because they can prevent the sysmon thread from deep sleep. 5441 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 5442 continue 5443 } 5444 // Drop allpLock so we can take sched.lock. 5445 unlock(&allpLock) 5446 // Need to decrement number of idle locked M's 5447 // (pretending that one more is running) before the CAS. 5448 // Otherwise the M from which we retake can exit the syscall, 5449 // increment nmidle and report deadlock. 5450 incidlelocked(-1) 5451 if atomic.Cas(&_p_.status, s, _Pidle) { 5452 if trace.enabled { 5453 traceGoSysBlock(_p_) 5454 traceProcStop(_p_) 5455 } 5456 n++ 5457 _p_.syscalltick++ 5458 handoffp(_p_) 5459 } 5460 incidlelocked(1) 5461 lock(&allpLock) 5462 } 5463 } 5464 unlock(&allpLock) 5465 return uint32(n) 5466 } 5467 5468 // Tell all goroutines that they have been preempted and they should stop. 5469 // This function is purely best-effort. It can fail to inform a goroutine if a 5470 // processor just started running it. 5471 // No locks need to be held. 5472 // Returns true if preemption request was issued to at least one goroutine. 5473 func preemptall() bool { 5474 res := false 5475 for _, _p_ := range allp { 5476 if _p_.status != _Prunning { 5477 continue 5478 } 5479 if preemptone(_p_) { 5480 res = true 5481 } 5482 } 5483 return res 5484 } 5485 5486 // Tell the goroutine running on processor P to stop. 5487 // This function is purely best-effort. It can incorrectly fail to inform the 5488 // goroutine. It can inform the wrong goroutine. Even if it informs the 5489 // correct goroutine, that goroutine might ignore the request if it is 5490 // simultaneously executing newstack. 5491 // No lock needs to be held. 5492 // Returns true if preemption request was issued. 5493 // The actual preemption will happen at some point in the future 5494 // and will be indicated by the gp->status no longer being 5495 // Grunning 5496 func preemptone(_p_ *p) bool { 5497 mp := _p_.m.ptr() 5498 if mp == nil || mp == getg().m { 5499 return false 5500 } 5501 gp := mp.curg 5502 if gp == nil || gp == mp.g0 { 5503 return false 5504 } 5505 5506 gp.preempt = true 5507 5508 // Every call in a goroutine checks for stack overflow by 5509 // comparing the current stack pointer to gp->stackguard0. 5510 // Setting gp->stackguard0 to StackPreempt folds 5511 // preemption into the normal stack overflow check. 5512 gp.stackguard0 = stackPreempt 5513 5514 // Request an async preemption of this P. 5515 if preemptMSupported && debug.asyncpreemptoff == 0 { 5516 _p_.preempt = true 5517 preemptM(mp) 5518 } 5519 5520 return true 5521 } 5522 5523 var starttime int64 5524 5525 func schedtrace(detailed bool) { 5526 now := nanotime() 5527 if starttime == 0 { 5528 starttime = now 5529 } 5530 5531 lock(&sched.lock) 5532 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 5533 if detailed { 5534 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 5535 } 5536 // We must be careful while reading data from P's, M's and G's. 5537 // Even if we hold schedlock, most data can be changed concurrently. 5538 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 5539 for i, _p_ := range allp { 5540 mp := _p_.m.ptr() 5541 h := atomic.Load(&_p_.runqhead) 5542 t := atomic.Load(&_p_.runqtail) 5543 if detailed { 5544 id := int64(-1) 5545 if mp != nil { 5546 id = mp.id 5547 } 5548 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n") 5549 } else { 5550 // In non-detailed mode format lengths of per-P run queues as: 5551 // [len1 len2 len3 len4] 5552 print(" ") 5553 if i == 0 { 5554 print("[") 5555 } 5556 print(t - h) 5557 if i == len(allp)-1 { 5558 print("]\n") 5559 } 5560 } 5561 } 5562 5563 if !detailed { 5564 unlock(&sched.lock) 5565 return 5566 } 5567 5568 for mp := allm; mp != nil; mp = mp.alllink { 5569 _p_ := mp.p.ptr() 5570 gp := mp.curg 5571 lockedg := mp.lockedg.ptr() 5572 id1 := int32(-1) 5573 if _p_ != nil { 5574 id1 = _p_.id 5575 } 5576 id2 := int64(-1) 5577 if gp != nil { 5578 id2 = gp.goid 5579 } 5580 id3 := int64(-1) 5581 if lockedg != nil { 5582 id3 = lockedg.goid 5583 } 5584 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") 5585 } 5586 5587 forEachG(func(gp *g) { 5588 mp := gp.m 5589 lockedm := gp.lockedm.ptr() 5590 id1 := int64(-1) 5591 if mp != nil { 5592 id1 = mp.id 5593 } 5594 id2 := int64(-1) 5595 if lockedm != nil { 5596 id2 = lockedm.id 5597 } 5598 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n") 5599 }) 5600 unlock(&sched.lock) 5601 } 5602 5603 // schedEnableUser enables or disables the scheduling of user 5604 // goroutines. 5605 // 5606 // This does not stop already running user goroutines, so the caller 5607 // should first stop the world when disabling user goroutines. 5608 func schedEnableUser(enable bool) { 5609 lock(&sched.lock) 5610 if sched.disable.user == !enable { 5611 unlock(&sched.lock) 5612 return 5613 } 5614 sched.disable.user = !enable 5615 if enable { 5616 n := sched.disable.n 5617 sched.disable.n = 0 5618 globrunqputbatch(&sched.disable.runnable, n) 5619 unlock(&sched.lock) 5620 for ; n != 0 && sched.npidle != 0; n-- { 5621 startm(nil, false) 5622 } 5623 } else { 5624 unlock(&sched.lock) 5625 } 5626 } 5627 5628 // schedEnabled reports whether gp should be scheduled. It returns 5629 // false is scheduling of gp is disabled. 5630 // 5631 // sched.lock must be held. 5632 func schedEnabled(gp *g) bool { 5633 assertLockHeld(&sched.lock) 5634 5635 if sched.disable.user { 5636 return isSystemGoroutine(gp, true) 5637 } 5638 return true 5639 } 5640 5641 // Put mp on midle list. 5642 // sched.lock must be held. 5643 // May run during STW, so write barriers are not allowed. 5644 //go:nowritebarrierrec 5645 func mput(mp *m) { 5646 assertLockHeld(&sched.lock) 5647 5648 mp.schedlink = sched.midle 5649 sched.midle.set(mp) 5650 sched.nmidle++ 5651 checkdead() 5652 } 5653 5654 // Try to get an m from midle list. 5655 // sched.lock must be held. 5656 // May run during STW, so write barriers are not allowed. 5657 //go:nowritebarrierrec 5658 func mget() *m { 5659 assertLockHeld(&sched.lock) 5660 5661 mp := sched.midle.ptr() 5662 if mp != nil { 5663 sched.midle = mp.schedlink 5664 sched.nmidle-- 5665 } 5666 return mp 5667 } 5668 5669 // Put gp on the global runnable queue. 5670 // sched.lock must be held. 5671 // May run during STW, so write barriers are not allowed. 5672 //go:nowritebarrierrec 5673 func globrunqput(gp *g) { 5674 assertLockHeld(&sched.lock) 5675 5676 sched.runq.pushBack(gp) 5677 sched.runqsize++ 5678 } 5679 5680 // Put gp at the head of the global runnable queue. 5681 // sched.lock must be held. 5682 // May run during STW, so write barriers are not allowed. 5683 //go:nowritebarrierrec 5684 func globrunqputhead(gp *g) { 5685 assertLockHeld(&sched.lock) 5686 5687 sched.runq.push(gp) 5688 sched.runqsize++ 5689 } 5690 5691 // Put a batch of runnable goroutines on the global runnable queue. 5692 // This clears *batch. 5693 // sched.lock must be held. 5694 // May run during STW, so write barriers are not allowed. 5695 //go:nowritebarrierrec 5696 func globrunqputbatch(batch *gQueue, n int32) { 5697 assertLockHeld(&sched.lock) 5698 5699 sched.runq.pushBackAll(*batch) 5700 sched.runqsize += n 5701 *batch = gQueue{} 5702 } 5703 5704 // Try get a batch of G's from the global runnable queue. 5705 // sched.lock must be held. 5706 func globrunqget(_p_ *p, max int32) *g { 5707 assertLockHeld(&sched.lock) 5708 5709 if sched.runqsize == 0 { 5710 return nil 5711 } 5712 5713 n := sched.runqsize/gomaxprocs + 1 5714 if n > sched.runqsize { 5715 n = sched.runqsize 5716 } 5717 if max > 0 && n > max { 5718 n = max 5719 } 5720 if n > int32(len(_p_.runq))/2 { 5721 n = int32(len(_p_.runq)) / 2 5722 } 5723 5724 sched.runqsize -= n 5725 5726 gp := sched.runq.pop() 5727 n-- 5728 for ; n > 0; n-- { 5729 gp1 := sched.runq.pop() 5730 runqput(_p_, gp1, false) 5731 } 5732 return gp 5733 } 5734 5735 // pMask is an atomic bitstring with one bit per P. 5736 type pMask []uint32 5737 5738 // read returns true if P id's bit is set. 5739 func (p pMask) read(id uint32) bool { 5740 word := id / 32 5741 mask := uint32(1) << (id % 32) 5742 return (atomic.Load(&p[word]) & mask) != 0 5743 } 5744 5745 // set sets P id's bit. 5746 func (p pMask) set(id int32) { 5747 word := id / 32 5748 mask := uint32(1) << (id % 32) 5749 atomic.Or(&p[word], mask) 5750 } 5751 5752 // clear clears P id's bit. 5753 func (p pMask) clear(id int32) { 5754 word := id / 32 5755 mask := uint32(1) << (id % 32) 5756 atomic.And(&p[word], ^mask) 5757 } 5758 5759 // updateTimerPMask clears pp's timer mask if it has no timers on its heap. 5760 // 5761 // Ideally, the timer mask would be kept immediately consistent on any timer 5762 // operations. Unfortunately, updating a shared global data structure in the 5763 // timer hot path adds too much overhead in applications frequently switching 5764 // between no timers and some timers. 5765 // 5766 // As a compromise, the timer mask is updated only on pidleget / pidleput. A 5767 // running P (returned by pidleget) may add a timer at any time, so its mask 5768 // must be set. An idle P (passed to pidleput) cannot add new timers while 5769 // idle, so if it has no timers at that time, its mask may be cleared. 5770 // 5771 // Thus, we get the following effects on timer-stealing in findrunnable: 5772 // 5773 // * Idle Ps with no timers when they go idle are never checked in findrunnable 5774 // (for work- or timer-stealing; this is the ideal case). 5775 // * Running Ps must always be checked. 5776 // * Idle Ps whose timers are stolen must continue to be checked until they run 5777 // again, even after timer expiration. 5778 // 5779 // When the P starts running again, the mask should be set, as a timer may be 5780 // added at any time. 5781 // 5782 // TODO(prattmic): Additional targeted updates may improve the above cases. 5783 // e.g., updating the mask when stealing a timer. 5784 func updateTimerPMask(pp *p) { 5785 if atomic.Load(&pp.numTimers) > 0 { 5786 return 5787 } 5788 5789 // Looks like there are no timers, however another P may transiently 5790 // decrement numTimers when handling a timerModified timer in 5791 // checkTimers. We must take timersLock to serialize with these changes. 5792 lock(&pp.timersLock) 5793 if atomic.Load(&pp.numTimers) == 0 { 5794 timerpMask.clear(pp.id) 5795 } 5796 unlock(&pp.timersLock) 5797 } 5798 5799 // pidleput puts p to on the _Pidle list. 5800 // 5801 // This releases ownership of p. Once sched.lock is released it is no longer 5802 // safe to use p. 5803 // 5804 // sched.lock must be held. 5805 // 5806 // May run during STW, so write barriers are not allowed. 5807 //go:nowritebarrierrec 5808 func pidleput(_p_ *p) { 5809 assertLockHeld(&sched.lock) 5810 5811 if !runqempty(_p_) { 5812 throw("pidleput: P has non-empty run queue") 5813 } 5814 updateTimerPMask(_p_) // clear if there are no timers. 5815 idlepMask.set(_p_.id) 5816 _p_.link = sched.pidle 5817 sched.pidle.set(_p_) 5818 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 5819 } 5820 5821 // pidleget tries to get a p from the _Pidle list, acquiring ownership. 5822 // 5823 // sched.lock must be held. 5824 // 5825 // May run during STW, so write barriers are not allowed. 5826 //go:nowritebarrierrec 5827 func pidleget() *p { 5828 assertLockHeld(&sched.lock) 5829 5830 _p_ := sched.pidle.ptr() 5831 if _p_ != nil { 5832 // Timer may get added at any time now. 5833 timerpMask.set(_p_.id) 5834 idlepMask.clear(_p_.id) 5835 sched.pidle = _p_.link 5836 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 5837 } 5838 return _p_ 5839 } 5840 5841 // runqempty reports whether _p_ has no Gs on its local run queue. 5842 // It never returns true spuriously. 5843 func runqempty(_p_ *p) bool { 5844 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, 5845 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext. 5846 // Simply observing that runqhead == runqtail and then observing that runqnext == nil 5847 // does not mean the queue is empty. 5848 for { 5849 head := atomic.Load(&_p_.runqhead) 5850 tail := atomic.Load(&_p_.runqtail) 5851 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext))) 5852 if tail == atomic.Load(&_p_.runqtail) { 5853 return head == tail && runnext == 0 5854 } 5855 } 5856 } 5857 5858 // To shake out latent assumptions about scheduling order, 5859 // we introduce some randomness into scheduling decisions 5860 // when running with the race detector. 5861 // The need for this was made obvious by changing the 5862 // (deterministic) scheduling order in Go 1.5 and breaking 5863 // many poorly-written tests. 5864 // With the randomness here, as long as the tests pass 5865 // consistently with -race, they shouldn't have latent scheduling 5866 // assumptions. 5867 const randomizeScheduler = raceenabled 5868 5869 // runqput tries to put g on the local runnable queue. 5870 // If next is false, runqput adds g to the tail of the runnable queue. 5871 // If next is true, runqput puts g in the _p_.runnext slot. 5872 // If the run queue is full, runnext puts g on the global queue. 5873 // Executed only by the owner P. 5874 func runqput(_p_ *p, gp *g, next bool) { 5875 if randomizeScheduler && next && fastrandn(2) == 0 { 5876 next = false 5877 } 5878 5879 if next { 5880 retryNext: 5881 oldnext := _p_.runnext 5882 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 5883 goto retryNext 5884 } 5885 if oldnext == 0 { 5886 return 5887 } 5888 // Kick the old runnext out to the regular run queue. 5889 gp = oldnext.ptr() 5890 } 5891 5892 retry: 5893 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers 5894 t := _p_.runqtail 5895 if t-h < uint32(len(_p_.runq)) { 5896 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 5897 atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 5898 return 5899 } 5900 if runqputslow(_p_, gp, h, t) { 5901 return 5902 } 5903 // the queue is not full, now the put above must succeed 5904 goto retry 5905 } 5906 5907 // Put g and a batch of work from local runnable queue on global queue. 5908 // Executed only by the owner P. 5909 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 5910 var batch [len(_p_.runq)/2 + 1]*g 5911 5912 // First, grab a batch from local queue. 5913 n := t - h 5914 n = n / 2 5915 if n != uint32(len(_p_.runq)/2) { 5916 throw("runqputslow: queue is not full") 5917 } 5918 for i := uint32(0); i < n; i++ { 5919 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 5920 } 5921 if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume 5922 return false 5923 } 5924 batch[n] = gp 5925 5926 if randomizeScheduler { 5927 for i := uint32(1); i <= n; i++ { 5928 j := fastrandn(i + 1) 5929 batch[i], batch[j] = batch[j], batch[i] 5930 } 5931 } 5932 5933 // Link the goroutines. 5934 for i := uint32(0); i < n; i++ { 5935 batch[i].schedlink.set(batch[i+1]) 5936 } 5937 var q gQueue 5938 q.head.set(batch[0]) 5939 q.tail.set(batch[n]) 5940 5941 // Now put the batch on global queue. 5942 lock(&sched.lock) 5943 globrunqputbatch(&q, int32(n+1)) 5944 unlock(&sched.lock) 5945 return true 5946 } 5947 5948 // runqputbatch tries to put all the G's on q on the local runnable queue. 5949 // If the queue is full, they are put on the global queue; in that case 5950 // this will temporarily acquire the scheduler lock. 5951 // Executed only by the owner P. 5952 func runqputbatch(pp *p, q *gQueue, qsize int) { 5953 h := atomic.LoadAcq(&pp.runqhead) 5954 t := pp.runqtail 5955 n := uint32(0) 5956 for !q.empty() && t-h < uint32(len(pp.runq)) { 5957 gp := q.pop() 5958 pp.runq[t%uint32(len(pp.runq))].set(gp) 5959 t++ 5960 n++ 5961 } 5962 qsize -= int(n) 5963 5964 if randomizeScheduler { 5965 off := func(o uint32) uint32 { 5966 return (pp.runqtail + o) % uint32(len(pp.runq)) 5967 } 5968 for i := uint32(1); i < n; i++ { 5969 j := fastrandn(i + 1) 5970 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)] 5971 } 5972 } 5973 5974 atomic.StoreRel(&pp.runqtail, t) 5975 if !q.empty() { 5976 lock(&sched.lock) 5977 globrunqputbatch(q, int32(qsize)) 5978 unlock(&sched.lock) 5979 } 5980 } 5981 5982 // Get g from local runnable queue. 5983 // If inheritTime is true, gp should inherit the remaining time in the 5984 // current time slice. Otherwise, it should start a new time slice. 5985 // Executed only by the owner P. 5986 func runqget(_p_ *p) (gp *g, inheritTime bool) { 5987 // If there's a runnext, it's the next G to run. 5988 next := _p_.runnext 5989 // If the runnext is non-0 and the CAS fails, it could only have been stolen by another P, 5990 // because other Ps can race to set runnext to 0, but only the current P can set it to non-0. 5991 // Hence, there's no need to retry this CAS if it falls. 5992 if next != 0 && _p_.runnext.cas(next, 0) { 5993 return next.ptr(), true 5994 } 5995 5996 for { 5997 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers 5998 t := _p_.runqtail 5999 if t == h { 6000 return nil, false 6001 } 6002 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 6003 if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume 6004 return gp, false 6005 } 6006 } 6007 } 6008 6009 // runqdrain drains the local runnable queue of _p_ and returns all goroutines in it. 6010 // Executed only by the owner P. 6011 func runqdrain(_p_ *p) (drainQ gQueue, n uint32) { 6012 oldNext := _p_.runnext 6013 if oldNext != 0 && _p_.runnext.cas(oldNext, 0) { 6014 drainQ.pushBack(oldNext.ptr()) 6015 n++ 6016 } 6017 6018 retry: 6019 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers 6020 t := _p_.runqtail 6021 qn := t - h 6022 if qn == 0 { 6023 return 6024 } 6025 if qn > uint32(len(_p_.runq)) { // read inconsistent h and t 6026 goto retry 6027 } 6028 6029 if !atomic.CasRel(&_p_.runqhead, h, h+qn) { // cas-release, commits consume 6030 goto retry 6031 } 6032 6033 // We've inverted the order in which it gets G's from the local P's runnable queue 6034 // and then advances the head pointer because we don't want to mess up the statuses of G's 6035 // while runqdrain() and runqsteal() are running in parallel. 6036 // Thus we should advance the head pointer before draining the local P into a gQueue, 6037 // so that we can update any gp.schedlink only after we take the full ownership of G, 6038 // meanwhile, other P's can't access to all G's in local P's runnable queue and steal them. 6039 // See https://groups.google.com/g/golang-dev/c/0pTKxEKhHSc/m/6Q85QjdVBQAJ for more details. 6040 for i := uint32(0); i < qn; i++ { 6041 gp := _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 6042 drainQ.pushBack(gp) 6043 n++ 6044 } 6045 return 6046 } 6047 6048 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 6049 // Batch is a ring buffer starting at batchHead. 6050 // Returns number of grabbed goroutines. 6051 // Can be executed by any P. 6052 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 6053 for { 6054 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers 6055 t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer 6056 n := t - h 6057 n = n - n/2 6058 if n == 0 { 6059 if stealRunNextG { 6060 // Try to steal from _p_.runnext. 6061 if next := _p_.runnext; next != 0 { 6062 if _p_.status == _Prunning { 6063 // Sleep to ensure that _p_ isn't about to run the g 6064 // we are about to steal. 6065 // The important use case here is when the g running 6066 // on _p_ ready()s another g and then almost 6067 // immediately blocks. Instead of stealing runnext 6068 // in this window, back off to give _p_ a chance to 6069 // schedule runnext. This will avoid thrashing gs 6070 // between different Ps. 6071 // A sync chan send/recv takes ~50ns as of time of 6072 // writing, so 3us gives ~50x overshoot. 6073 if GOOS != "windows" { 6074 usleep(3) 6075 } else { 6076 // On windows system timer granularity is 6077 // 1-15ms, which is way too much for this 6078 // optimization. So just yield. 6079 osyield() 6080 } 6081 } 6082 if !_p_.runnext.cas(next, 0) { 6083 continue 6084 } 6085 batch[batchHead%uint32(len(batch))] = next 6086 return 1 6087 } 6088 } 6089 return 0 6090 } 6091 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 6092 continue 6093 } 6094 for i := uint32(0); i < n; i++ { 6095 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 6096 batch[(batchHead+i)%uint32(len(batch))] = g 6097 } 6098 if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume 6099 return n 6100 } 6101 } 6102 } 6103 6104 // Steal half of elements from local runnable queue of p2 6105 // and put onto local runnable queue of p. 6106 // Returns one of the stolen elements (or nil if failed). 6107 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 6108 t := _p_.runqtail 6109 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 6110 if n == 0 { 6111 return nil 6112 } 6113 n-- 6114 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 6115 if n == 0 { 6116 return gp 6117 } 6118 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers 6119 if t-h+n >= uint32(len(_p_.runq)) { 6120 throw("runqsteal: runq overflow") 6121 } 6122 atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 6123 return gp 6124 } 6125 6126 // A gQueue is a dequeue of Gs linked through g.schedlink. A G can only 6127 // be on one gQueue or gList at a time. 6128 type gQueue struct { 6129 head guintptr 6130 tail guintptr 6131 } 6132 6133 // empty reports whether q is empty. 6134 func (q *gQueue) empty() bool { 6135 return q.head == 0 6136 } 6137 6138 // push adds gp to the head of q. 6139 func (q *gQueue) push(gp *g) { 6140 gp.schedlink = q.head 6141 q.head.set(gp) 6142 if q.tail == 0 { 6143 q.tail.set(gp) 6144 } 6145 } 6146 6147 // pushBack adds gp to the tail of q. 6148 func (q *gQueue) pushBack(gp *g) { 6149 gp.schedlink = 0 6150 if q.tail != 0 { 6151 q.tail.ptr().schedlink.set(gp) 6152 } else { 6153 q.head.set(gp) 6154 } 6155 q.tail.set(gp) 6156 } 6157 6158 // pushBackAll adds all Gs in q2 to the tail of q. After this q2 must 6159 // not be used. 6160 func (q *gQueue) pushBackAll(q2 gQueue) { 6161 if q2.tail == 0 { 6162 return 6163 } 6164 q2.tail.ptr().schedlink = 0 6165 if q.tail != 0 { 6166 q.tail.ptr().schedlink = q2.head 6167 } else { 6168 q.head = q2.head 6169 } 6170 q.tail = q2.tail 6171 } 6172 6173 // pop removes and returns the head of queue q. It returns nil if 6174 // q is empty. 6175 func (q *gQueue) pop() *g { 6176 gp := q.head.ptr() 6177 if gp != nil { 6178 q.head = gp.schedlink 6179 if q.head == 0 { 6180 q.tail = 0 6181 } 6182 } 6183 return gp 6184 } 6185 6186 // popList takes all Gs in q and returns them as a gList. 6187 func (q *gQueue) popList() gList { 6188 stack := gList{q.head} 6189 *q = gQueue{} 6190 return stack 6191 } 6192 6193 // A gList is a list of Gs linked through g.schedlink. A G can only be 6194 // on one gQueue or gList at a time. 6195 type gList struct { 6196 head guintptr 6197 } 6198 6199 // empty reports whether l is empty. 6200 func (l *gList) empty() bool { 6201 return l.head == 0 6202 } 6203 6204 // push adds gp to the head of l. 6205 func (l *gList) push(gp *g) { 6206 gp.schedlink = l.head 6207 l.head.set(gp) 6208 } 6209 6210 // pushAll prepends all Gs in q to l. 6211 func (l *gList) pushAll(q gQueue) { 6212 if !q.empty() { 6213 q.tail.ptr().schedlink = l.head 6214 l.head = q.head 6215 } 6216 } 6217 6218 // pop removes and returns the head of l. If l is empty, it returns nil. 6219 func (l *gList) pop() *g { 6220 gp := l.head.ptr() 6221 if gp != nil { 6222 l.head = gp.schedlink 6223 } 6224 return gp 6225 } 6226 6227 //go:linkname setMaxThreads runtime/debug.setMaxThreads 6228 func setMaxThreads(in int) (out int) { 6229 lock(&sched.lock) 6230 out = int(sched.maxmcount) 6231 if in > 0x7fffffff { // MaxInt32 6232 sched.maxmcount = 0x7fffffff 6233 } else { 6234 sched.maxmcount = int32(in) 6235 } 6236 checkmcount() 6237 unlock(&sched.lock) 6238 return 6239 } 6240 6241 //go:nosplit 6242 func procPin() int { 6243 _g_ := getg() 6244 mp := _g_.m 6245 6246 mp.locks++ 6247 return int(mp.p.ptr().id) 6248 } 6249 6250 //go:nosplit 6251 func procUnpin() { 6252 _g_ := getg() 6253 _g_.m.locks-- 6254 } 6255 6256 //go:linkname sync_runtime_procPin sync.runtime_procPin 6257 //go:nosplit 6258 func sync_runtime_procPin() int { 6259 return procPin() 6260 } 6261 6262 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 6263 //go:nosplit 6264 func sync_runtime_procUnpin() { 6265 procUnpin() 6266 } 6267 6268 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 6269 //go:nosplit 6270 func sync_atomic_runtime_procPin() int { 6271 return procPin() 6272 } 6273 6274 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 6275 //go:nosplit 6276 func sync_atomic_runtime_procUnpin() { 6277 procUnpin() 6278 } 6279 6280 // Active spinning for sync.Mutex. 6281 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 6282 //go:nosplit 6283 func sync_runtime_canSpin(i int) bool { 6284 // sync.Mutex is cooperative, so we are conservative with spinning. 6285 // Spin only few times and only if running on a multicore machine and 6286 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 6287 // As opposed to runtime mutex we don't do passive spinning here, 6288 // because there can be work on global runq or on other Ps. 6289 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 6290 return false 6291 } 6292 if p := getg().m.p.ptr(); !runqempty(p) { 6293 return false 6294 } 6295 return true 6296 } 6297 6298 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 6299 //go:nosplit 6300 func sync_runtime_doSpin() { 6301 procyield(active_spin_cnt) 6302 } 6303 6304 var stealOrder randomOrder 6305 6306 // randomOrder/randomEnum are helper types for randomized work stealing. 6307 // They allow to enumerate all Ps in different pseudo-random orders without repetitions. 6308 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS 6309 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration. 6310 type randomOrder struct { 6311 count uint32 6312 coprimes []uint32 6313 } 6314 6315 type randomEnum struct { 6316 i uint32 6317 count uint32 6318 pos uint32 6319 inc uint32 6320 } 6321 6322 func (ord *randomOrder) reset(count uint32) { 6323 ord.count = count 6324 ord.coprimes = ord.coprimes[:0] 6325 for i := uint32(1); i <= count; i++ { 6326 if gcd(i, count) == 1 { 6327 ord.coprimes = append(ord.coprimes, i) 6328 } 6329 } 6330 } 6331 6332 func (ord *randomOrder) start(i uint32) randomEnum { 6333 return randomEnum{ 6334 count: ord.count, 6335 pos: i % ord.count, 6336 inc: ord.coprimes[i%uint32(len(ord.coprimes))], 6337 } 6338 } 6339 6340 func (enum *randomEnum) done() bool { 6341 return enum.i == enum.count 6342 } 6343 6344 func (enum *randomEnum) next() { 6345 enum.i++ 6346 enum.pos = (enum.pos + enum.inc) % enum.count 6347 } 6348 6349 func (enum *randomEnum) position() uint32 { 6350 return enum.pos 6351 } 6352 6353 func gcd(a, b uint32) uint32 { 6354 for b != 0 { 6355 a, b = b, a%b 6356 } 6357 return a 6358 } 6359 6360 // An initTask represents the set of initializations that need to be done for a package. 6361 // Keep in sync with ../../test/initempty.go:initTask 6362 type initTask struct { 6363 // TODO: pack the first 3 fields more tightly? 6364 state uintptr // 0 = uninitialized, 1 = in progress, 2 = done 6365 ndeps uintptr 6366 nfns uintptr 6367 // followed by ndeps instances of an *initTask, one per package depended on 6368 // followed by nfns pcs, one per init function to run 6369 } 6370 6371 // inittrace stores statistics for init functions which are 6372 // updated by malloc and newproc when active is true. 6373 var inittrace tracestat 6374 6375 type tracestat struct { 6376 active bool // init tracing activation status 6377 id int64 // init goroutine id 6378 allocs uint64 // heap allocations 6379 bytes uint64 // heap allocated bytes 6380 } 6381 6382 func doInit(t *initTask) { 6383 switch t.state { 6384 case 2: // fully initialized 6385 return 6386 case 1: // initialization in progress 6387 throw("recursive call during initialization - linker skew") 6388 default: // not initialized yet 6389 t.state = 1 // initialization in progress 6390 6391 for i := uintptr(0); i < t.ndeps; i++ { 6392 p := add(unsafe.Pointer(t), (3+i)*goarch.PtrSize) 6393 t2 := *(**initTask)(p) 6394 doInit(t2) 6395 } 6396 6397 if t.nfns == 0 { 6398 t.state = 2 // initialization done 6399 return 6400 } 6401 6402 var ( 6403 start int64 6404 before tracestat 6405 ) 6406 6407 if inittrace.active { 6408 start = nanotime() 6409 // Load stats non-atomically since tracinit is updated only by this init goroutine. 6410 before = inittrace 6411 } 6412 6413 firstFunc := add(unsafe.Pointer(t), (3+t.ndeps)*goarch.PtrSize) 6414 for i := uintptr(0); i < t.nfns; i++ { 6415 p := add(firstFunc, i*goarch.PtrSize) 6416 f := *(*func())(unsafe.Pointer(&p)) 6417 f() 6418 } 6419 6420 if inittrace.active { 6421 end := nanotime() 6422 // Load stats non-atomically since tracinit is updated only by this init goroutine. 6423 after := inittrace 6424 6425 f := *(*func())(unsafe.Pointer(&firstFunc)) 6426 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f))) 6427 6428 var sbuf [24]byte 6429 print("init ", pkg, " @") 6430 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ") 6431 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ") 6432 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ") 6433 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs") 6434 print("\n") 6435 } 6436 6437 t.state = 2 // initialization done 6438 } 6439 }