github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 // Goroutine scheduler 14 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 15 // 16 // The main concepts are: 17 // G - goroutine. 18 // M - worker thread, or machine. 19 // P - processor, a resource that is required to execute Go code. 20 // M must have an associated P to execute Go code, however it can be 21 // blocked or in a syscall w/o an associated P. 22 // 23 // Design doc at https://golang.org/s/go11sched. 24 25 var ( 26 m0 m 27 g0 g 28 ) 29 30 //go:linkname runtime_init runtime.init 31 func runtime_init() 32 33 //go:linkname main_init main.init 34 func main_init() 35 36 // main_init_done is a signal used by cgocallbackg that initialization 37 // has been completed. It is made before _cgo_notify_runtime_init_done, 38 // so all cgo calls can rely on it existing. When main_init is complete, 39 // it is closed, meaning cgocallbackg can reliably receive from it. 40 var main_init_done chan bool 41 42 //go:linkname main_main main.main 43 func main_main() 44 45 // runtimeInitTime is the nanotime() at which the runtime started. 46 var runtimeInitTime int64 47 48 // The main goroutine. 49 func main() { 50 g := getg() 51 52 // Racectx of m0->g0 is used only as the parent of the main goroutine. 53 // It must not be used for anything else. 54 g.m.g0.racectx = 0 55 56 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 57 // Using decimal instead of binary GB and MB because 58 // they look nicer in the stack overflow failure message. 59 if sys.PtrSize == 8 { 60 maxstacksize = 1000000000 61 } else { 62 maxstacksize = 250000000 63 } 64 65 // Record when the world started. 66 runtimeInitTime = nanotime() 67 68 systemstack(func() { 69 newm(sysmon, nil) 70 }) 71 72 // Lock the main goroutine onto this, the main OS thread, 73 // during initialization. Most programs won't care, but a few 74 // do require certain calls to be made by the main thread. 75 // Those can arrange for main.main to run in the main thread 76 // by calling runtime.LockOSThread during initialization 77 // to preserve the lock. 78 lockOSThread() 79 80 if g.m != &m0 { 81 throw("runtime.main not on m0") 82 } 83 84 runtime_init() // must be before defer 85 86 // Defer unlock so that runtime.Goexit during init does the unlock too. 87 needUnlock := true 88 defer func() { 89 if needUnlock { 90 unlockOSThread() 91 } 92 }() 93 94 gcenable() 95 96 main_init_done = make(chan bool) 97 if iscgo { 98 if _cgo_thread_start == nil { 99 throw("_cgo_thread_start missing") 100 } 101 if _cgo_malloc == nil { 102 throw("_cgo_malloc missing") 103 } 104 if _cgo_free == nil { 105 throw("_cgo_free missing") 106 } 107 if GOOS != "windows" { 108 if _cgo_setenv == nil { 109 throw("_cgo_setenv missing") 110 } 111 if _cgo_unsetenv == nil { 112 throw("_cgo_unsetenv missing") 113 } 114 } 115 if _cgo_notify_runtime_init_done == nil { 116 throw("_cgo_notify_runtime_init_done missing") 117 } 118 cgocall(_cgo_notify_runtime_init_done, nil) 119 } 120 121 main_init() 122 close(main_init_done) 123 124 needUnlock = false 125 unlockOSThread() 126 127 if isarchive || islibrary { 128 // A program compiled with -buildmode=c-archive or c-shared 129 // has a main, but it is not executed. 130 return 131 } 132 main_main() 133 if raceenabled { 134 racefini() 135 } 136 137 // Make racy client program work: if panicking on 138 // another goroutine at the same time as main returns, 139 // let the other goroutine finish printing the panic trace. 140 // Once it does, it will exit. See issue 3934. 141 if panicking != 0 { 142 gopark(nil, nil, "panicwait", traceEvGoStop, 1) 143 } 144 145 exit(0) 146 for { 147 var x *int32 148 *x = 0 149 } 150 } 151 152 // os_beforeExit is called from os.Exit(0). 153 //go:linkname os_beforeExit os.runtime_beforeExit 154 func os_beforeExit() { 155 if raceenabled { 156 racefini() 157 } 158 } 159 160 // start forcegc helper goroutine 161 func init() { 162 go forcegchelper() 163 } 164 165 func forcegchelper() { 166 forcegc.g = getg() 167 for { 168 lock(&forcegc.lock) 169 if forcegc.idle != 0 { 170 throw("forcegc: phase error") 171 } 172 atomic.Store(&forcegc.idle, 1) 173 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1) 174 // this goroutine is explicitly resumed by sysmon 175 if debug.gctrace > 0 { 176 println("GC forced") 177 } 178 gcStart(gcBackgroundMode, true) 179 } 180 } 181 182 //go:nosplit 183 184 // Gosched yields the processor, allowing other goroutines to run. It does not 185 // suspend the current goroutine, so execution resumes automatically. 186 func Gosched() { 187 mcall(gosched_m) 188 } 189 190 // Puts the current goroutine into a waiting state and calls unlockf. 191 // If unlockf returns false, the goroutine is resumed. 192 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) { 193 mp := acquirem() 194 gp := mp.curg 195 status := readgstatus(gp) 196 if status != _Grunning && status != _Gscanrunning { 197 throw("gopark: bad g status") 198 } 199 mp.waitlock = lock 200 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) 201 gp.waitreason = reason 202 mp.waittraceev = traceEv 203 mp.waittraceskip = traceskip 204 releasem(mp) 205 // can't do anything that might move the G between Ms here. 206 mcall(park_m) 207 } 208 209 // Puts the current goroutine into a waiting state and unlocks the lock. 210 // The goroutine can be made runnable again by calling goready(gp). 211 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) { 212 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 213 } 214 215 func goready(gp *g, traceskip int) { 216 systemstack(func() { 217 ready(gp, traceskip) 218 }) 219 } 220 221 //go:nosplit 222 func acquireSudog() *sudog { 223 // Delicate dance: the semaphore implementation calls 224 // acquireSudog, acquireSudog calls new(sudog), 225 // new calls malloc, malloc can call the garbage collector, 226 // and the garbage collector calls the semaphore implementation 227 // in stopTheWorld. 228 // Break the cycle by doing acquirem/releasem around new(sudog). 229 // The acquirem/releasem increments m.locks during new(sudog), 230 // which keeps the garbage collector from being invoked. 231 mp := acquirem() 232 pp := mp.p.ptr() 233 if len(pp.sudogcache) == 0 { 234 lock(&sched.sudoglock) 235 // First, try to grab a batch from central cache. 236 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 237 s := sched.sudogcache 238 sched.sudogcache = s.next 239 s.next = nil 240 pp.sudogcache = append(pp.sudogcache, s) 241 } 242 unlock(&sched.sudoglock) 243 // If the central cache is empty, allocate a new one. 244 if len(pp.sudogcache) == 0 { 245 pp.sudogcache = append(pp.sudogcache, new(sudog)) 246 } 247 } 248 n := len(pp.sudogcache) 249 s := pp.sudogcache[n-1] 250 pp.sudogcache[n-1] = nil 251 pp.sudogcache = pp.sudogcache[:n-1] 252 if s.elem != nil { 253 throw("acquireSudog: found s.elem != nil in cache") 254 } 255 releasem(mp) 256 return s 257 } 258 259 //go:nosplit 260 func releaseSudog(s *sudog) { 261 if s.elem != nil { 262 throw("runtime: sudog with non-nil elem") 263 } 264 if s.selectdone != nil { 265 throw("runtime: sudog with non-nil selectdone") 266 } 267 if s.next != nil { 268 throw("runtime: sudog with non-nil next") 269 } 270 if s.prev != nil { 271 throw("runtime: sudog with non-nil prev") 272 } 273 if s.waitlink != nil { 274 throw("runtime: sudog with non-nil waitlink") 275 } 276 gp := getg() 277 if gp.param != nil { 278 throw("runtime: releaseSudog with non-nil gp.param") 279 } 280 mp := acquirem() // avoid rescheduling to another P 281 pp := mp.p.ptr() 282 if len(pp.sudogcache) == cap(pp.sudogcache) { 283 // Transfer half of local cache to the central cache. 284 var first, last *sudog 285 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 286 n := len(pp.sudogcache) 287 p := pp.sudogcache[n-1] 288 pp.sudogcache[n-1] = nil 289 pp.sudogcache = pp.sudogcache[:n-1] 290 if first == nil { 291 first = p 292 } else { 293 last.next = p 294 } 295 last = p 296 } 297 lock(&sched.sudoglock) 298 last.next = sched.sudogcache 299 sched.sudogcache = first 300 unlock(&sched.sudoglock) 301 } 302 pp.sudogcache = append(pp.sudogcache, s) 303 releasem(mp) 304 } 305 306 // funcPC returns the entry PC of the function f. 307 // It assumes that f is a func value. Otherwise the behavior is undefined. 308 //go:nosplit 309 func funcPC(f interface{}) uintptr { 310 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize)) 311 } 312 313 // called from assembly 314 func badmcall(fn func(*g)) { 315 throw("runtime: mcall called on m->g0 stack") 316 } 317 318 func badmcall2(fn func(*g)) { 319 throw("runtime: mcall function returned") 320 } 321 322 func badreflectcall() { 323 panic("runtime: arg size to reflect.call more than 1GB") 324 } 325 326 func lockedOSThread() bool { 327 gp := getg() 328 return gp.lockedm != nil && gp.m.lockedg != nil 329 } 330 331 var ( 332 allgs []*g 333 allglock mutex 334 ) 335 336 func allgadd(gp *g) { 337 if readgstatus(gp) == _Gidle { 338 throw("allgadd: bad status Gidle") 339 } 340 341 lock(&allglock) 342 allgs = append(allgs, gp) 343 allglen = uintptr(len(allgs)) 344 unlock(&allglock) 345 } 346 347 const ( 348 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 349 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 350 _GoidCacheBatch = 16 351 ) 352 353 // The bootstrap sequence is: 354 // 355 // call osinit 356 // call schedinit 357 // make & queue new G 358 // call runtime·mstart 359 // 360 // The new G calls runtime·main. 361 func schedinit() { 362 // raceinit must be the first call to race detector. 363 // In particular, it must be done before mallocinit below calls racemapshadow. 364 _g_ := getg() 365 if raceenabled { 366 _g_.racectx = raceinit() 367 } 368 369 sched.maxmcount = 10000 370 371 // Cache the framepointer experiment. This affects stack unwinding. 372 framepointer_enabled = haveexperiment("framepointer") 373 374 tracebackinit() 375 moduledataverify() 376 stackinit() 377 mallocinit() 378 mcommoninit(_g_.m) 379 380 goargs() 381 goenvs() 382 parsedebugvars() 383 gcinit() 384 385 sched.lastpoll = uint64(nanotime()) 386 procs := int(ncpu) 387 if n := atoi(gogetenv("GOMAXPROCS")); n > 0 { 388 if n > _MaxGomaxprocs { 389 n = _MaxGomaxprocs 390 } 391 procs = n 392 } 393 if procresize(int32(procs)) != nil { 394 throw("unknown runnable goroutine during bootstrap") 395 } 396 397 if sys.BuildVersion == "" { 398 // Condition should never trigger. This code just serves 399 // to ensure runtime·buildVersion is kept in the resulting binary. 400 sys.BuildVersion = "unknown" 401 } 402 } 403 404 func dumpgstatus(gp *g) { 405 _g_ := getg() 406 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 407 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 408 } 409 410 func checkmcount() { 411 // sched lock is held 412 if sched.mcount > sched.maxmcount { 413 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 414 throw("thread exhaustion") 415 } 416 } 417 418 func mcommoninit(mp *m) { 419 _g_ := getg() 420 421 // g0 stack won't make sense for user (and is not necessary unwindable). 422 if _g_ != _g_.m.g0 { 423 callers(1, mp.createstack[:]) 424 } 425 426 mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks()) 427 if mp.fastrand == 0 { 428 mp.fastrand = 0x49f6428a 429 } 430 431 lock(&sched.lock) 432 mp.id = sched.mcount 433 sched.mcount++ 434 checkmcount() 435 mpreinit(mp) 436 if mp.gsignal != nil { 437 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 438 } 439 440 // Add to allm so garbage collector doesn't free g->m 441 // when it is just in a register or thread-local storage. 442 mp.alllink = allm 443 444 // NumCgoCall() iterates over allm w/o schedlock, 445 // so we need to publish it safely. 446 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 447 unlock(&sched.lock) 448 } 449 450 // Mark gp ready to run. 451 func ready(gp *g, traceskip int) { 452 if trace.enabled { 453 traceGoUnpark(gp, traceskip) 454 } 455 456 status := readgstatus(gp) 457 458 // Mark runnable. 459 _g_ := getg() 460 _g_.m.locks++ // disable preemption because it can be holding p in a local var 461 if status&^_Gscan != _Gwaiting { 462 dumpgstatus(gp) 463 throw("bad g->status in ready") 464 } 465 466 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 467 casgstatus(gp, _Gwaiting, _Grunnable) 468 runqput(_g_.m.p.ptr(), gp, true) 469 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { // TODO: fast atomic 470 wakep() 471 } 472 _g_.m.locks-- 473 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack 474 _g_.stackguard0 = stackPreempt 475 } 476 } 477 478 func gcprocs() int32 { 479 // Figure out how many CPUs to use during GC. 480 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 481 lock(&sched.lock) 482 n := gomaxprocs 483 if n > ncpu { 484 n = ncpu 485 } 486 if n > _MaxGcproc { 487 n = _MaxGcproc 488 } 489 if n > sched.nmidle+1 { // one M is currently running 490 n = sched.nmidle + 1 491 } 492 unlock(&sched.lock) 493 return n 494 } 495 496 func needaddgcproc() bool { 497 lock(&sched.lock) 498 n := gomaxprocs 499 if n > ncpu { 500 n = ncpu 501 } 502 if n > _MaxGcproc { 503 n = _MaxGcproc 504 } 505 n -= sched.nmidle + 1 // one M is currently running 506 unlock(&sched.lock) 507 return n > 0 508 } 509 510 func helpgc(nproc int32) { 511 _g_ := getg() 512 lock(&sched.lock) 513 pos := 0 514 for n := int32(1); n < nproc; n++ { // one M is currently running 515 if allp[pos].mcache == _g_.m.mcache { 516 pos++ 517 } 518 mp := mget() 519 if mp == nil { 520 throw("gcprocs inconsistency") 521 } 522 mp.helpgc = n 523 mp.p.set(allp[pos]) 524 mp.mcache = allp[pos].mcache 525 pos++ 526 notewakeup(&mp.park) 527 } 528 unlock(&sched.lock) 529 } 530 531 // freezeStopWait is a large value that freezetheworld sets 532 // sched.stopwait to in order to request that all Gs permanently stop. 533 const freezeStopWait = 0x7fffffff 534 535 // Similar to stopTheWorld but best-effort and can be called several times. 536 // There is no reverse operation, used during crashing. 537 // This function must not lock any mutexes. 538 func freezetheworld() { 539 // stopwait and preemption requests can be lost 540 // due to races with concurrently executing threads, 541 // so try several times 542 for i := 0; i < 5; i++ { 543 // this should tell the scheduler to not start any new goroutines 544 sched.stopwait = freezeStopWait 545 atomic.Store(&sched.gcwaiting, 1) 546 // this should stop running goroutines 547 if !preemptall() { 548 break // no running goroutines 549 } 550 usleep(1000) 551 } 552 // to be sure 553 usleep(1000) 554 preemptall() 555 usleep(1000) 556 } 557 558 func isscanstatus(status uint32) bool { 559 if status == _Gscan { 560 throw("isscanstatus: Bad status Gscan") 561 } 562 return status&_Gscan == _Gscan 563 } 564 565 // All reads and writes of g's status go through readgstatus, casgstatus 566 // castogscanstatus, casfrom_Gscanstatus. 567 //go:nosplit 568 func readgstatus(gp *g) uint32 { 569 return atomic.Load(&gp.atomicstatus) 570 } 571 572 // Ownership of gscanvalid: 573 // 574 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), 575 // then gp owns gp.gscanvalid, and other goroutines must not modify it. 576 // 577 // Otherwise, a second goroutine can lock the scan state by setting _Gscan 578 // in the status bit and then modify gscanvalid, and then unlock the scan state. 579 // 580 // Note that the first condition implies an exception to the second: 581 // if a second goroutine changes gp's status to _Grunning|_Gscan, 582 // that second goroutine still does not have the right to modify gscanvalid. 583 584 // The Gscanstatuses are acting like locks and this releases them. 585 // If it proves to be a performance hit we should be able to make these 586 // simple atomic stores but for now we are going to throw if 587 // we see an inconsistent state. 588 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 589 success := false 590 591 // Check that transition is valid. 592 switch oldval { 593 default: 594 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 595 dumpgstatus(gp) 596 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 597 case _Gscanrunnable, 598 _Gscanwaiting, 599 _Gscanrunning, 600 _Gscansyscall: 601 if newval == oldval&^_Gscan { 602 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 603 } 604 case _Gscanenqueue: 605 if newval == _Gwaiting { 606 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 607 } 608 } 609 if !success { 610 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 611 dumpgstatus(gp) 612 throw("casfrom_Gscanstatus: gp->status is not in scan state") 613 } 614 if newval == _Grunning { 615 gp.gcscanvalid = false 616 } 617 } 618 619 // This will return false if the gp is not in the expected status and the cas fails. 620 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 621 func castogscanstatus(gp *g, oldval, newval uint32) bool { 622 switch oldval { 623 case _Grunnable, 624 _Gwaiting, 625 _Gsyscall: 626 if newval == oldval|_Gscan { 627 return atomic.Cas(&gp.atomicstatus, oldval, newval) 628 } 629 case _Grunning: 630 if newval == _Gscanrunning || newval == _Gscanenqueue { 631 return atomic.Cas(&gp.atomicstatus, oldval, newval) 632 } 633 } 634 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 635 throw("castogscanstatus") 636 panic("not reached") 637 } 638 639 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 640 // and casfrom_Gscanstatus instead. 641 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 642 // put it in the Gscan state is finished. 643 //go:nosplit 644 func casgstatus(gp *g, oldval, newval uint32) { 645 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 646 systemstack(func() { 647 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 648 throw("casgstatus: bad incoming values") 649 }) 650 } 651 652 if oldval == _Grunning && gp.gcscanvalid { 653 // If oldvall == _Grunning, then the actual status must be 654 // _Grunning or _Grunning|_Gscan; either way, 655 // we own gp.gcscanvalid, so it's safe to read. 656 // gp.gcscanvalid must not be true when we are running. 657 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n") 658 throw("casgstatus") 659 } 660 661 // loop if gp->atomicstatus is in a scan state giving 662 // GC time to finish and change the state to oldval. 663 for !atomic.Cas(&gp.atomicstatus, oldval, newval) { 664 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 665 systemstack(func() { 666 throw("casgstatus: waiting for Gwaiting but is Grunnable") 667 }) 668 } 669 // Help GC if needed. 670 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { 671 // gp.preemptscan = false 672 // systemstack(func() { 673 // gcphasework(gp) 674 // }) 675 // } 676 } 677 if newval == _Grunning { 678 gp.gcscanvalid = false 679 } 680 } 681 682 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 683 // Returns old status. Cannot call casgstatus directly, because we are racing with an 684 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 685 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 686 // it would loop waiting for the status to go back to Gwaiting, which it never will. 687 //go:nosplit 688 func casgcopystack(gp *g) uint32 { 689 for { 690 oldstatus := readgstatus(gp) &^ _Gscan 691 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 692 throw("copystack: bad status, not Gwaiting or Grunnable") 693 } 694 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 695 return oldstatus 696 } 697 } 698 } 699 700 // scang blocks until gp's stack has been scanned. 701 // It might be scanned by scang or it might be scanned by the goroutine itself. 702 // Either way, the stack scan has completed when scang returns. 703 func scang(gp *g) { 704 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone. 705 // Nothing is racing with us now, but gcscandone might be set to true left over 706 // from an earlier round of stack scanning (we scan twice per GC). 707 // We use gcscandone to record whether the scan has been done during this round. 708 // It is important that the scan happens exactly once: if called twice, 709 // the installation of stack barriers will detect the double scan and die. 710 711 gp.gcscandone = false 712 713 // Endeavor to get gcscandone set to true, 714 // either by doing the stack scan ourselves or by coercing gp to scan itself. 715 // gp.gcscandone can transition from false to true when we're not looking 716 // (if we asked for preemption), so any time we lock the status using 717 // castogscanstatus we have to double-check that the scan is still not done. 718 for !gp.gcscandone { 719 switch s := readgstatus(gp); s { 720 default: 721 dumpgstatus(gp) 722 throw("stopg: invalid status") 723 724 case _Gdead: 725 // No stack. 726 gp.gcscandone = true 727 728 case _Gcopystack: 729 // Stack being switched. Go around again. 730 731 case _Grunnable, _Gsyscall, _Gwaiting: 732 // Claim goroutine by setting scan bit. 733 // Racing with execution or readying of gp. 734 // The scan bit keeps them from running 735 // the goroutine until we're done. 736 if castogscanstatus(gp, s, s|_Gscan) { 737 if !gp.gcscandone { 738 scanstack(gp) 739 gp.gcscandone = true 740 } 741 restartg(gp) 742 } 743 744 case _Gscanwaiting: 745 // newstack is doing a scan for us right now. Wait. 746 747 case _Grunning: 748 // Goroutine running. Try to preempt execution so it can scan itself. 749 // The preemption handler (in newstack) does the actual scan. 750 751 // Optimization: if there is already a pending preemption request 752 // (from the previous loop iteration), don't bother with the atomics. 753 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt { 754 break 755 } 756 757 // Ask for preemption and self scan. 758 if castogscanstatus(gp, _Grunning, _Gscanrunning) { 759 if !gp.gcscandone { 760 gp.preemptscan = true 761 gp.preempt = true 762 gp.stackguard0 = stackPreempt 763 } 764 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning) 765 } 766 } 767 } 768 769 gp.preemptscan = false // cancel scan request if no longer needed 770 } 771 772 // The GC requests that this routine be moved from a scanmumble state to a mumble state. 773 func restartg(gp *g) { 774 s := readgstatus(gp) 775 switch s { 776 default: 777 dumpgstatus(gp) 778 throw("restartg: unexpected status") 779 780 case _Gdead: 781 // ok 782 783 case _Gscanrunnable, 784 _Gscanwaiting, 785 _Gscansyscall: 786 casfrom_Gscanstatus(gp, s, s&^_Gscan) 787 788 // Scan is now completed. 789 // Goroutine now needs to be made runnable. 790 // We put it on the global run queue; ready blocks on the global scheduler lock. 791 case _Gscanenqueue: 792 casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting) 793 if gp != getg().m.curg { 794 throw("processing Gscanenqueue on wrong m") 795 } 796 dropg() 797 ready(gp, 0) 798 } 799 } 800 801 // stopTheWorld stops all P's from executing goroutines, interrupting 802 // all goroutines at GC safe points and records reason as the reason 803 // for the stop. On return, only the current goroutine's P is running. 804 // stopTheWorld must not be called from a system stack and the caller 805 // must not hold worldsema. The caller must call startTheWorld when 806 // other P's should resume execution. 807 // 808 // stopTheWorld is safe for multiple goroutines to call at the 809 // same time. Each will execute its own stop, and the stops will 810 // be serialized. 811 // 812 // This is also used by routines that do stack dumps. If the system is 813 // in panic or being exited, this may not reliably stop all 814 // goroutines. 815 func stopTheWorld(reason string) { 816 semacquire(&worldsema, false) 817 getg().m.preemptoff = reason 818 systemstack(stopTheWorldWithSema) 819 } 820 821 // startTheWorld undoes the effects of stopTheWorld. 822 func startTheWorld() { 823 systemstack(startTheWorldWithSema) 824 // worldsema must be held over startTheWorldWithSema to ensure 825 // gomaxprocs cannot change while worldsema is held. 826 semrelease(&worldsema) 827 getg().m.preemptoff = "" 828 } 829 830 // Holding worldsema grants an M the right to try to stop the world 831 // and prevents gomaxprocs from changing concurrently. 832 var worldsema uint32 = 1 833 834 // stopTheWorldWithSema is the core implementation of stopTheWorld. 835 // The caller is responsible for acquiring worldsema and disabling 836 // preemption first and then should stopTheWorldWithSema on the system 837 // stack: 838 // 839 // semacquire(&worldsema, false) 840 // m.preemptoff = "reason" 841 // systemstack(stopTheWorldWithSema) 842 // 843 // When finished, the caller must either call startTheWorld or undo 844 // these three operations separately: 845 // 846 // m.preemptoff = "" 847 // systemstack(startTheWorldWithSema) 848 // semrelease(&worldsema) 849 // 850 // It is allowed to acquire worldsema once and then execute multiple 851 // startTheWorldWithSema/stopTheWorldWithSema pairs. 852 // Other P's are able to execute between successive calls to 853 // startTheWorldWithSema and stopTheWorldWithSema. 854 // Holding worldsema causes any other goroutines invoking 855 // stopTheWorld to block. 856 func stopTheWorldWithSema() { 857 _g_ := getg() 858 859 // If we hold a lock, then we won't be able to stop another M 860 // that is blocked trying to acquire the lock. 861 if _g_.m.locks > 0 { 862 throw("stopTheWorld: holding locks") 863 } 864 865 lock(&sched.lock) 866 sched.stopwait = gomaxprocs 867 atomic.Store(&sched.gcwaiting, 1) 868 preemptall() 869 // stop current P 870 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 871 sched.stopwait-- 872 // try to retake all P's in Psyscall status 873 for i := 0; i < int(gomaxprocs); i++ { 874 p := allp[i] 875 s := p.status 876 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 877 if trace.enabled { 878 traceGoSysBlock(p) 879 traceProcStop(p) 880 } 881 p.syscalltick++ 882 sched.stopwait-- 883 } 884 } 885 // stop idle P's 886 for { 887 p := pidleget() 888 if p == nil { 889 break 890 } 891 p.status = _Pgcstop 892 sched.stopwait-- 893 } 894 wait := sched.stopwait > 0 895 unlock(&sched.lock) 896 897 // wait for remaining P's to stop voluntarily 898 if wait { 899 for { 900 // wait for 100us, then try to re-preempt in case of any races 901 if notetsleep(&sched.stopnote, 100*1000) { 902 noteclear(&sched.stopnote) 903 break 904 } 905 preemptall() 906 } 907 } 908 if sched.stopwait != 0 { 909 throw("stopTheWorld: not stopped") 910 } 911 for i := 0; i < int(gomaxprocs); i++ { 912 p := allp[i] 913 if p.status != _Pgcstop { 914 throw("stopTheWorld: not stopped") 915 } 916 } 917 } 918 919 func mhelpgc() { 920 _g_ := getg() 921 _g_.m.helpgc = -1 922 } 923 924 func startTheWorldWithSema() { 925 _g_ := getg() 926 927 _g_.m.locks++ // disable preemption because it can be holding p in a local var 928 gp := netpoll(false) // non-blocking 929 injectglist(gp) 930 add := needaddgcproc() 931 lock(&sched.lock) 932 933 procs := gomaxprocs 934 if newprocs != 0 { 935 procs = newprocs 936 newprocs = 0 937 } 938 p1 := procresize(procs) 939 sched.gcwaiting = 0 940 if sched.sysmonwait != 0 { 941 sched.sysmonwait = 0 942 notewakeup(&sched.sysmonnote) 943 } 944 unlock(&sched.lock) 945 946 for p1 != nil { 947 p := p1 948 p1 = p1.link.ptr() 949 if p.m != 0 { 950 mp := p.m.ptr() 951 p.m = 0 952 if mp.nextp != 0 { 953 throw("startTheWorld: inconsistent mp->nextp") 954 } 955 mp.nextp.set(p) 956 notewakeup(&mp.park) 957 } else { 958 // Start M to run P. Do not start another M below. 959 newm(nil, p) 960 add = false 961 } 962 } 963 964 // Wakeup an additional proc in case we have excessive runnable goroutines 965 // in local queues or in the global queue. If we don't, the proc will park itself. 966 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 967 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 968 wakep() 969 } 970 971 if add { 972 // If GC could have used another helper proc, start one now, 973 // in the hope that it will be available next time. 974 // It would have been even better to start it before the collection, 975 // but doing so requires allocating memory, so it's tricky to 976 // coordinate. This lazy approach works out in practice: 977 // we don't mind if the first couple gc rounds don't have quite 978 // the maximum number of procs. 979 newm(mhelpgc, nil) 980 } 981 _g_.m.locks-- 982 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 983 _g_.stackguard0 = stackPreempt 984 } 985 } 986 987 // Called to start an M. 988 //go:nosplit 989 func mstart() { 990 _g_ := getg() 991 992 if _g_.stack.lo == 0 { 993 // Initialize stack bounds from system stack. 994 // Cgo may have left stack size in stack.hi. 995 size := _g_.stack.hi 996 if size == 0 { 997 size = 8192 * sys.StackGuardMultiplier 998 } 999 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1000 _g_.stack.lo = _g_.stack.hi - size + 1024 1001 } 1002 // Initialize stack guards so that we can start calling 1003 // both Go and C functions with stack growth prologues. 1004 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1005 _g_.stackguard1 = _g_.stackguard0 1006 mstart1() 1007 } 1008 1009 func mstart1() { 1010 _g_ := getg() 1011 1012 if _g_ != _g_.m.g0 { 1013 throw("bad runtime·mstart") 1014 } 1015 1016 // Record top of stack for use by mcall. 1017 // Once we call schedule we're never coming back, 1018 // so other calls can reuse this stack space. 1019 gosave(&_g_.m.g0.sched) 1020 _g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used 1021 asminit() 1022 minit() 1023 1024 // Install signal handlers; after minit so that minit can 1025 // prepare the thread to be able to handle the signals. 1026 if _g_.m == &m0 { 1027 // Create an extra M for callbacks on threads not created by Go. 1028 if iscgo && !cgoHasExtraM { 1029 cgoHasExtraM = true 1030 newextram() 1031 } 1032 initsig() 1033 } 1034 1035 if fn := _g_.m.mstartfn; fn != nil { 1036 fn() 1037 } 1038 1039 if _g_.m.helpgc != 0 { 1040 _g_.m.helpgc = 0 1041 stopm() 1042 } else if _g_.m != &m0 { 1043 acquirep(_g_.m.nextp.ptr()) 1044 _g_.m.nextp = 0 1045 } 1046 schedule() 1047 } 1048 1049 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1050 // If a P is currently executing code, this will bring the P to a GC 1051 // safe point and execute fn on that P. If the P is not executing code 1052 // (it is idle or in a syscall), this will call fn(p) directly while 1053 // preventing the P from exiting its state. This does not ensure that 1054 // fn will run on every CPU executing Go code, but it acts as a global 1055 // memory barrier. GC uses this as a "ragged barrier." 1056 // 1057 // The caller must hold worldsema. 1058 // 1059 //go:systemstack 1060 func forEachP(fn func(*p)) { 1061 mp := acquirem() 1062 _p_ := getg().m.p.ptr() 1063 1064 lock(&sched.lock) 1065 if sched.safePointWait != 0 { 1066 throw("forEachP: sched.safePointWait != 0") 1067 } 1068 sched.safePointWait = gomaxprocs - 1 1069 sched.safePointFn = fn 1070 1071 // Ask all Ps to run the safe point function. 1072 for _, p := range allp[:gomaxprocs] { 1073 if p != _p_ { 1074 atomic.Store(&p.runSafePointFn, 1) 1075 } 1076 } 1077 preemptall() 1078 1079 // Any P entering _Pidle or _Psyscall from now on will observe 1080 // p.runSafePointFn == 1 and will call runSafePointFn when 1081 // changing its status to _Pidle/_Psyscall. 1082 1083 // Run safe point function for all idle Ps. sched.pidle will 1084 // not change because we hold sched.lock. 1085 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1086 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1087 fn(p) 1088 sched.safePointWait-- 1089 } 1090 } 1091 1092 wait := sched.safePointWait > 0 1093 unlock(&sched.lock) 1094 1095 // Run fn for the current P. 1096 fn(_p_) 1097 1098 // Force Ps currently in _Psyscall into _Pidle and hand them 1099 // off to induce safe point function execution. 1100 for i := 0; i < int(gomaxprocs); i++ { 1101 p := allp[i] 1102 s := p.status 1103 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1104 if trace.enabled { 1105 traceGoSysBlock(p) 1106 traceProcStop(p) 1107 } 1108 p.syscalltick++ 1109 handoffp(p) 1110 } 1111 } 1112 1113 // Wait for remaining Ps to run fn. 1114 if wait { 1115 for { 1116 // Wait for 100us, then try to re-preempt in 1117 // case of any races. 1118 // 1119 // Requires system stack. 1120 if notetsleep(&sched.safePointNote, 100*1000) { 1121 noteclear(&sched.safePointNote) 1122 break 1123 } 1124 preemptall() 1125 } 1126 } 1127 if sched.safePointWait != 0 { 1128 throw("forEachP: not done") 1129 } 1130 for i := 0; i < int(gomaxprocs); i++ { 1131 p := allp[i] 1132 if p.runSafePointFn != 0 { 1133 throw("forEachP: P did not run fn") 1134 } 1135 } 1136 1137 lock(&sched.lock) 1138 sched.safePointFn = nil 1139 unlock(&sched.lock) 1140 releasem(mp) 1141 } 1142 1143 // runSafePointFn runs the safe point function, if any, for this P. 1144 // This should be called like 1145 // 1146 // if getg().m.p.runSafePointFn != 0 { 1147 // runSafePointFn() 1148 // } 1149 // 1150 // runSafePointFn must be checked on any transition in to _Pidle or 1151 // _Psyscall to avoid a race where forEachP sees that the P is running 1152 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1153 // nor the P run the safe-point function. 1154 func runSafePointFn() { 1155 p := getg().m.p.ptr() 1156 // Resolve the race between forEachP running the safe-point 1157 // function on this P's behalf and this P running the 1158 // safe-point function directly. 1159 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1160 return 1161 } 1162 sched.safePointFn(p) 1163 lock(&sched.lock) 1164 sched.safePointWait-- 1165 if sched.safePointWait == 0 { 1166 notewakeup(&sched.safePointNote) 1167 } 1168 unlock(&sched.lock) 1169 } 1170 1171 // When running with cgo, we call _cgo_thread_start 1172 // to start threads for us so that we can play nicely with 1173 // foreign code. 1174 var cgoThreadStart unsafe.Pointer 1175 1176 type cgothreadstart struct { 1177 g guintptr 1178 tls *uint64 1179 fn unsafe.Pointer 1180 } 1181 1182 // Allocate a new m unassociated with any thread. 1183 // Can use p for allocation context if needed. 1184 // fn is recorded as the new m's m.mstartfn. 1185 // 1186 // This function it known to the compiler to inhibit the 1187 // go:nowritebarrierrec annotation because it uses P for allocation. 1188 func allocm(_p_ *p, fn func()) *m { 1189 _g_ := getg() 1190 _g_.m.locks++ // disable GC because it can be called from sysmon 1191 if _g_.m.p == 0 { 1192 acquirep(_p_) // temporarily borrow p for mallocs in this function 1193 } 1194 mp := new(m) 1195 mp.mstartfn = fn 1196 mcommoninit(mp) 1197 1198 // In case of cgo or Solaris, pthread_create will make us a stack. 1199 // Windows and Plan 9 will layout sched stack on OS stack. 1200 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" { 1201 mp.g0 = malg(-1) 1202 } else { 1203 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1204 } 1205 mp.g0.m = mp 1206 1207 if _p_ == _g_.m.p.ptr() { 1208 releasep() 1209 } 1210 _g_.m.locks-- 1211 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1212 _g_.stackguard0 = stackPreempt 1213 } 1214 1215 return mp 1216 } 1217 1218 // needm is called when a cgo callback happens on a 1219 // thread without an m (a thread not created by Go). 1220 // In this case, needm is expected to find an m to use 1221 // and return with m, g initialized correctly. 1222 // Since m and g are not set now (likely nil, but see below) 1223 // needm is limited in what routines it can call. In particular 1224 // it can only call nosplit functions (textflag 7) and cannot 1225 // do any scheduling that requires an m. 1226 // 1227 // In order to avoid needing heavy lifting here, we adopt 1228 // the following strategy: there is a stack of available m's 1229 // that can be stolen. Using compare-and-swap 1230 // to pop from the stack has ABA races, so we simulate 1231 // a lock by doing an exchange (via casp) to steal the stack 1232 // head and replace the top pointer with MLOCKED (1). 1233 // This serves as a simple spin lock that we can use even 1234 // without an m. The thread that locks the stack in this way 1235 // unlocks the stack by storing a valid stack head pointer. 1236 // 1237 // In order to make sure that there is always an m structure 1238 // available to be stolen, we maintain the invariant that there 1239 // is always one more than needed. At the beginning of the 1240 // program (if cgo is in use) the list is seeded with a single m. 1241 // If needm finds that it has taken the last m off the list, its job 1242 // is - once it has installed its own m so that it can do things like 1243 // allocate memory - to create a spare m and put it on the list. 1244 // 1245 // Each of these extra m's also has a g0 and a curg that are 1246 // pressed into service as the scheduling stack and current 1247 // goroutine for the duration of the cgo callback. 1248 // 1249 // When the callback is done with the m, it calls dropm to 1250 // put the m back on the list. 1251 //go:nosplit 1252 func needm(x byte) { 1253 if iscgo && !cgoHasExtraM { 1254 // Can happen if C/C++ code calls Go from a global ctor. 1255 // Can not throw, because scheduler is not initialized yet. 1256 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1257 exit(1) 1258 } 1259 1260 // Lock extra list, take head, unlock popped list. 1261 // nilokay=false is safe here because of the invariant above, 1262 // that the extra list always contains or will soon contain 1263 // at least one m. 1264 mp := lockextra(false) 1265 1266 // Set needextram when we've just emptied the list, 1267 // so that the eventual call into cgocallbackg will 1268 // allocate a new m for the extra list. We delay the 1269 // allocation until then so that it can be done 1270 // after exitsyscall makes sure it is okay to be 1271 // running at all (that is, there's no garbage collection 1272 // running right now). 1273 mp.needextram = mp.schedlink == 0 1274 unlockextra(mp.schedlink.ptr()) 1275 1276 // Save and block signals before installing g. 1277 // Once g is installed, any incoming signals will try to execute, 1278 // but we won't have the sigaltstack settings and other data 1279 // set up appropriately until the end of minit, which will 1280 // unblock the signals. This is the same dance as when 1281 // starting a new m to run Go code via newosproc. 1282 msigsave(mp) 1283 sigblock() 1284 1285 // Install g (= m->g0) and set the stack bounds 1286 // to match the current stack. We don't actually know 1287 // how big the stack is, like we don't know how big any 1288 // scheduling stack is, but we assume there's at least 32 kB, 1289 // which is more than enough for us. 1290 setg(mp.g0) 1291 _g_ := getg() 1292 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 1293 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 1294 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1295 1296 // Initialize this thread to use the m. 1297 asminit() 1298 minit() 1299 } 1300 1301 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 1302 1303 // newextram allocates an m and puts it on the extra list. 1304 // It is called with a working local m, so that it can do things 1305 // like call schedlock and allocate. 1306 func newextram() { 1307 // Create extra goroutine locked to extra m. 1308 // The goroutine is the context in which the cgo callback will run. 1309 // The sched.pc will never be returned to, but setting it to 1310 // goexit makes clear to the traceback routines where 1311 // the goroutine stack ends. 1312 mp := allocm(nil, nil) 1313 gp := malg(4096) 1314 gp.sched.pc = funcPC(goexit) + sys.PCQuantum 1315 gp.sched.sp = gp.stack.hi 1316 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame 1317 gp.sched.lr = 0 1318 gp.sched.g = guintptr(unsafe.Pointer(gp)) 1319 gp.syscallpc = gp.sched.pc 1320 gp.syscallsp = gp.sched.sp 1321 gp.stktopsp = gp.sched.sp 1322 // malg returns status as Gidle, change to Gsyscall before adding to allg 1323 // where GC will see it. 1324 casgstatus(gp, _Gidle, _Gsyscall) 1325 gp.m = mp 1326 mp.curg = gp 1327 mp.locked = _LockInternal 1328 mp.lockedg = gp 1329 gp.lockedm = mp 1330 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 1331 if raceenabled { 1332 gp.racectx = racegostart(funcPC(newextram)) 1333 } 1334 // put on allg for garbage collector 1335 allgadd(gp) 1336 1337 // Add m to the extra list. 1338 mnext := lockextra(true) 1339 mp.schedlink.set(mnext) 1340 unlockextra(mp) 1341 } 1342 1343 // dropm is called when a cgo callback has called needm but is now 1344 // done with the callback and returning back into the non-Go thread. 1345 // It puts the current m back onto the extra list. 1346 // 1347 // The main expense here is the call to signalstack to release the 1348 // m's signal stack, and then the call to needm on the next callback 1349 // from this thread. It is tempting to try to save the m for next time, 1350 // which would eliminate both these costs, but there might not be 1351 // a next time: the current thread (which Go does not control) might exit. 1352 // If we saved the m for that thread, there would be an m leak each time 1353 // such a thread exited. Instead, we acquire and release an m on each 1354 // call. These should typically not be scheduling operations, just a few 1355 // atomics, so the cost should be small. 1356 // 1357 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1358 // variable using pthread_key_create. Unlike the pthread keys we already use 1359 // on OS X, this dummy key would never be read by Go code. It would exist 1360 // only so that we could register at thread-exit-time destructor. 1361 // That destructor would put the m back onto the extra list. 1362 // This is purely a performance optimization. The current version, 1363 // in which dropm happens on each cgo call, is still correct too. 1364 // We may have to keep the current version on systems with cgo 1365 // but without pthreads, like Windows. 1366 func dropm() { 1367 // Clear m and g, and return m to the extra list. 1368 // After the call to setg we can only call nosplit functions 1369 // with no pointer manipulation. 1370 mp := getg().m 1371 mnext := lockextra(true) 1372 mp.schedlink.set(mnext) 1373 1374 // Block signals before unminit. 1375 // Unminit unregisters the signal handling stack (but needs g on some systems). 1376 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 1377 // It's important not to try to handle a signal between those two steps. 1378 sigblock() 1379 unminit() 1380 setg(nil) 1381 msigrestore(mp) 1382 1383 // Commit the release of mp. 1384 unlockextra(mp) 1385 } 1386 1387 var extram uintptr 1388 1389 // lockextra locks the extra list and returns the list head. 1390 // The caller must unlock the list by storing a new list head 1391 // to extram. If nilokay is true, then lockextra will 1392 // return a nil list head if that's what it finds. If nilokay is false, 1393 // lockextra will keep waiting until the list head is no longer nil. 1394 //go:nosplit 1395 func lockextra(nilokay bool) *m { 1396 const locked = 1 1397 1398 for { 1399 old := atomic.Loaduintptr(&extram) 1400 if old == locked { 1401 yield := osyield 1402 yield() 1403 continue 1404 } 1405 if old == 0 && !nilokay { 1406 usleep(1) 1407 continue 1408 } 1409 if atomic.Casuintptr(&extram, old, locked) { 1410 return (*m)(unsafe.Pointer(old)) 1411 } 1412 yield := osyield 1413 yield() 1414 continue 1415 } 1416 } 1417 1418 //go:nosplit 1419 func unlockextra(mp *m) { 1420 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1421 } 1422 1423 // Create a new m. It will start off with a call to fn, or else the scheduler. 1424 // fn needs to be static and not a heap allocated closure. 1425 // May run with m.p==nil, so write barriers are not allowed. 1426 //go:nowritebarrier 1427 func newm(fn func(), _p_ *p) { 1428 mp := allocm(_p_, fn) 1429 mp.nextp.set(_p_) 1430 msigsave(mp) 1431 if iscgo { 1432 var ts cgothreadstart 1433 if _cgo_thread_start == nil { 1434 throw("_cgo_thread_start missing") 1435 } 1436 ts.g.set(mp.g0) 1437 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1438 ts.fn = unsafe.Pointer(funcPC(mstart)) 1439 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1440 return 1441 } 1442 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi)) 1443 } 1444 1445 // Stops execution of the current m until new work is available. 1446 // Returns with acquired P. 1447 func stopm() { 1448 _g_ := getg() 1449 1450 if _g_.m.locks != 0 { 1451 throw("stopm holding locks") 1452 } 1453 if _g_.m.p != 0 { 1454 throw("stopm holding p") 1455 } 1456 if _g_.m.spinning { 1457 _g_.m.spinning = false 1458 atomic.Xadd(&sched.nmspinning, -1) 1459 } 1460 1461 retry: 1462 lock(&sched.lock) 1463 mput(_g_.m) 1464 unlock(&sched.lock) 1465 notesleep(&_g_.m.park) 1466 noteclear(&_g_.m.park) 1467 if _g_.m.helpgc != 0 { 1468 gchelper() 1469 _g_.m.helpgc = 0 1470 _g_.m.mcache = nil 1471 _g_.m.p = 0 1472 goto retry 1473 } 1474 acquirep(_g_.m.nextp.ptr()) 1475 _g_.m.nextp = 0 1476 } 1477 1478 func mspinning() { 1479 gp := getg() 1480 if !runqempty(gp.m.nextp.ptr()) { 1481 // Something (presumably the GC) was readied while the 1482 // runtime was starting up this M, so the M is no 1483 // longer spinning. 1484 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1485 throw("mspinning: nmspinning underflowed") 1486 } 1487 } else { 1488 gp.m.spinning = true 1489 } 1490 } 1491 1492 // Schedules some M to run the p (creates an M if necessary). 1493 // If p==nil, tries to get an idle P, if no idle P's does nothing. 1494 // May run with m.p==nil, so write barriers are not allowed. 1495 //go:nowritebarrier 1496 func startm(_p_ *p, spinning bool) { 1497 lock(&sched.lock) 1498 if _p_ == nil { 1499 _p_ = pidleget() 1500 if _p_ == nil { 1501 unlock(&sched.lock) 1502 if spinning { 1503 atomic.Xadd(&sched.nmspinning, -1) 1504 } 1505 return 1506 } 1507 } 1508 mp := mget() 1509 unlock(&sched.lock) 1510 if mp == nil { 1511 var fn func() 1512 if spinning { 1513 fn = mspinning 1514 } 1515 newm(fn, _p_) 1516 return 1517 } 1518 if mp.spinning { 1519 throw("startm: m is spinning") 1520 } 1521 if mp.nextp != 0 { 1522 throw("startm: m has p") 1523 } 1524 if spinning && !runqempty(_p_) { 1525 throw("startm: p has runnable gs") 1526 } 1527 mp.spinning = spinning 1528 mp.nextp.set(_p_) 1529 notewakeup(&mp.park) 1530 } 1531 1532 // Hands off P from syscall or locked M. 1533 // Always runs without a P, so write barriers are not allowed. 1534 //go:nowritebarrier 1535 func handoffp(_p_ *p) { 1536 // if it has local work, start it straight away 1537 if !runqempty(_p_) || sched.runqsize != 0 { 1538 startm(_p_, false) 1539 return 1540 } 1541 // no local work, check that there are no spinning/idle M's, 1542 // otherwise our help is not required 1543 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 1544 startm(_p_, true) 1545 return 1546 } 1547 lock(&sched.lock) 1548 if sched.gcwaiting != 0 { 1549 _p_.status = _Pgcstop 1550 sched.stopwait-- 1551 if sched.stopwait == 0 { 1552 notewakeup(&sched.stopnote) 1553 } 1554 unlock(&sched.lock) 1555 return 1556 } 1557 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 1558 sched.safePointFn(_p_) 1559 sched.safePointWait-- 1560 if sched.safePointWait == 0 { 1561 notewakeup(&sched.safePointNote) 1562 } 1563 } 1564 if sched.runqsize != 0 { 1565 unlock(&sched.lock) 1566 startm(_p_, false) 1567 return 1568 } 1569 // If this is the last running P and nobody is polling network, 1570 // need to wakeup another M to poll network. 1571 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 1572 unlock(&sched.lock) 1573 startm(_p_, false) 1574 return 1575 } 1576 pidleput(_p_) 1577 unlock(&sched.lock) 1578 } 1579 1580 // Tries to add one more P to execute G's. 1581 // Called when a G is made runnable (newproc, ready). 1582 func wakep() { 1583 // be conservative about spinning threads 1584 if !atomic.Cas(&sched.nmspinning, 0, 1) { 1585 return 1586 } 1587 startm(nil, true) 1588 } 1589 1590 // Stops execution of the current m that is locked to a g until the g is runnable again. 1591 // Returns with acquired P. 1592 func stoplockedm() { 1593 _g_ := getg() 1594 1595 if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m { 1596 throw("stoplockedm: inconsistent locking") 1597 } 1598 if _g_.m.p != 0 { 1599 // Schedule another M to run this p. 1600 _p_ := releasep() 1601 handoffp(_p_) 1602 } 1603 incidlelocked(1) 1604 // Wait until another thread schedules lockedg again. 1605 notesleep(&_g_.m.park) 1606 noteclear(&_g_.m.park) 1607 status := readgstatus(_g_.m.lockedg) 1608 if status&^_Gscan != _Grunnable { 1609 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 1610 dumpgstatus(_g_) 1611 throw("stoplockedm: not runnable") 1612 } 1613 acquirep(_g_.m.nextp.ptr()) 1614 _g_.m.nextp = 0 1615 } 1616 1617 // Schedules the locked m to run the locked gp. 1618 // May run during STW, so write barriers are not allowed. 1619 //go:nowritebarrier 1620 func startlockedm(gp *g) { 1621 _g_ := getg() 1622 1623 mp := gp.lockedm 1624 if mp == _g_.m { 1625 throw("startlockedm: locked to me") 1626 } 1627 if mp.nextp != 0 { 1628 throw("startlockedm: m has p") 1629 } 1630 // directly handoff current P to the locked m 1631 incidlelocked(-1) 1632 _p_ := releasep() 1633 mp.nextp.set(_p_) 1634 notewakeup(&mp.park) 1635 stopm() 1636 } 1637 1638 // Stops the current m for stopTheWorld. 1639 // Returns when the world is restarted. 1640 func gcstopm() { 1641 _g_ := getg() 1642 1643 if sched.gcwaiting == 0 { 1644 throw("gcstopm: not waiting for gc") 1645 } 1646 if _g_.m.spinning { 1647 _g_.m.spinning = false 1648 atomic.Xadd(&sched.nmspinning, -1) 1649 } 1650 _p_ := releasep() 1651 lock(&sched.lock) 1652 _p_.status = _Pgcstop 1653 sched.stopwait-- 1654 if sched.stopwait == 0 { 1655 notewakeup(&sched.stopnote) 1656 } 1657 unlock(&sched.lock) 1658 stopm() 1659 } 1660 1661 // Schedules gp to run on the current M. 1662 // If inheritTime is true, gp inherits the remaining time in the 1663 // current time slice. Otherwise, it starts a new time slice. 1664 // Never returns. 1665 func execute(gp *g, inheritTime bool) { 1666 _g_ := getg() 1667 1668 casgstatus(gp, _Grunnable, _Grunning) 1669 gp.waitsince = 0 1670 gp.preempt = false 1671 gp.stackguard0 = gp.stack.lo + _StackGuard 1672 if !inheritTime { 1673 _g_.m.p.ptr().schedtick++ 1674 } 1675 _g_.m.curg = gp 1676 gp.m = _g_.m 1677 1678 // Check whether the profiler needs to be turned on or off. 1679 hz := sched.profilehz 1680 if _g_.m.profilehz != hz { 1681 resetcpuprofiler(hz) 1682 } 1683 1684 if trace.enabled { 1685 // GoSysExit has to happen when we have a P, but before GoStart. 1686 // So we emit it here. 1687 if gp.syscallsp != 0 && gp.sysblocktraced { 1688 // Since gp.sysblocktraced is true, we must emit an event. 1689 // There is a race between the code that initializes sysexitseq 1690 // and sysexitticks (in exitsyscall, which runs without a P, 1691 // and therefore is not stopped with the rest of the world) 1692 // and the code that initializes a new trace. 1693 // The recorded sysexitseq and sysexitticks must therefore 1694 // be treated as "best effort". If they are valid for this trace, 1695 // then great, use them for greater accuracy. 1696 // But if they're not valid for this trace, assume that the 1697 // trace was started after the actual syscall exit (but before 1698 // we actually managed to start the goroutine, aka right now), 1699 // and assign a fresh time stamp to keep the log consistent. 1700 seq, ts := gp.sysexitseq, gp.sysexitticks 1701 if seq == 0 || int64(seq)-int64(trace.seqStart) < 0 { 1702 seq, ts = tracestamp() 1703 } 1704 traceGoSysExit(seq, ts) 1705 } 1706 traceGoStart() 1707 } 1708 1709 gogo(&gp.sched) 1710 } 1711 1712 // Finds a runnable goroutine to execute. 1713 // Tries to steal from other P's, get g from global queue, poll network. 1714 func findrunnable() (gp *g, inheritTime bool) { 1715 _g_ := getg() 1716 1717 top: 1718 if sched.gcwaiting != 0 { 1719 gcstopm() 1720 goto top 1721 } 1722 if _g_.m.p.ptr().runSafePointFn != 0 { 1723 runSafePointFn() 1724 } 1725 if fingwait && fingwake { 1726 if gp := wakefing(); gp != nil { 1727 ready(gp, 0) 1728 } 1729 } 1730 1731 // local runq 1732 if gp, inheritTime := runqget(_g_.m.p.ptr()); gp != nil { 1733 return gp, inheritTime 1734 } 1735 1736 // global runq 1737 if sched.runqsize != 0 { 1738 lock(&sched.lock) 1739 gp := globrunqget(_g_.m.p.ptr(), 0) 1740 unlock(&sched.lock) 1741 if gp != nil { 1742 return gp, false 1743 } 1744 } 1745 1746 // Poll network. 1747 // This netpoll is only an optimization before we resort to stealing. 1748 // We can safely skip it if there a thread blocked in netpoll already. 1749 // If there is any kind of logical race with that blocked thread 1750 // (e.g. it has already returned from netpoll, but does not set lastpoll yet), 1751 // this thread will do blocking netpoll below anyway. 1752 if netpollinited() && sched.lastpoll != 0 { 1753 if gp := netpoll(false); gp != nil { // non-blocking 1754 // netpoll returns list of goroutines linked by schedlink. 1755 injectglist(gp.schedlink.ptr()) 1756 casgstatus(gp, _Gwaiting, _Grunnable) 1757 if trace.enabled { 1758 traceGoUnpark(gp, 0) 1759 } 1760 return gp, false 1761 } 1762 } 1763 1764 // If number of spinning M's >= number of busy P's, block. 1765 // This is necessary to prevent excessive CPU consumption 1766 // when GOMAXPROCS>>1 but the program parallelism is low. 1767 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= uint32(gomaxprocs)-atomic.Load(&sched.npidle) { // TODO: fast atomic 1768 goto stop 1769 } 1770 if !_g_.m.spinning { 1771 _g_.m.spinning = true 1772 atomic.Xadd(&sched.nmspinning, 1) 1773 } 1774 // random steal from other P's 1775 for i := 0; i < int(4*gomaxprocs); i++ { 1776 if sched.gcwaiting != 0 { 1777 goto top 1778 } 1779 _p_ := allp[fastrand1()%uint32(gomaxprocs)] 1780 var gp *g 1781 if _p_ == _g_.m.p.ptr() { 1782 gp, _ = runqget(_p_) 1783 } else { 1784 stealRunNextG := i > 2*int(gomaxprocs) // first look for ready queues with more than 1 g 1785 gp = runqsteal(_g_.m.p.ptr(), _p_, stealRunNextG) 1786 } 1787 if gp != nil { 1788 return gp, false 1789 } 1790 } 1791 1792 stop: 1793 1794 // We have nothing to do. If we're in the GC mark phase, can 1795 // safely scan and blacken objects, and have work to do, run 1796 // idle-time marking rather than give up the P. 1797 if _p_ := _g_.m.p.ptr(); gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != nil && gcMarkWorkAvailable(_p_) { 1798 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 1799 gp := _p_.gcBgMarkWorker 1800 casgstatus(gp, _Gwaiting, _Grunnable) 1801 if trace.enabled { 1802 traceGoUnpark(gp, 0) 1803 } 1804 return gp, false 1805 } 1806 1807 // return P and block 1808 lock(&sched.lock) 1809 if sched.gcwaiting != 0 || _g_.m.p.ptr().runSafePointFn != 0 { 1810 unlock(&sched.lock) 1811 goto top 1812 } 1813 if sched.runqsize != 0 { 1814 gp := globrunqget(_g_.m.p.ptr(), 0) 1815 unlock(&sched.lock) 1816 return gp, false 1817 } 1818 _p_ := releasep() 1819 pidleput(_p_) 1820 unlock(&sched.lock) 1821 if _g_.m.spinning { 1822 _g_.m.spinning = false 1823 atomic.Xadd(&sched.nmspinning, -1) 1824 } 1825 1826 // check all runqueues once again 1827 for i := 0; i < int(gomaxprocs); i++ { 1828 _p_ := allp[i] 1829 if _p_ != nil && !runqempty(_p_) { 1830 lock(&sched.lock) 1831 _p_ = pidleget() 1832 unlock(&sched.lock) 1833 if _p_ != nil { 1834 acquirep(_p_) 1835 goto top 1836 } 1837 break 1838 } 1839 } 1840 1841 // poll network 1842 if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 1843 if _g_.m.p != 0 { 1844 throw("findrunnable: netpoll with p") 1845 } 1846 if _g_.m.spinning { 1847 throw("findrunnable: netpoll with spinning") 1848 } 1849 gp := netpoll(true) // block until new work is available 1850 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 1851 if gp != nil { 1852 lock(&sched.lock) 1853 _p_ = pidleget() 1854 unlock(&sched.lock) 1855 if _p_ != nil { 1856 acquirep(_p_) 1857 injectglist(gp.schedlink.ptr()) 1858 casgstatus(gp, _Gwaiting, _Grunnable) 1859 if trace.enabled { 1860 traceGoUnpark(gp, 0) 1861 } 1862 return gp, false 1863 } 1864 injectglist(gp) 1865 } 1866 } 1867 stopm() 1868 goto top 1869 } 1870 1871 func resetspinning() { 1872 _g_ := getg() 1873 1874 var nmspinning uint32 1875 if _g_.m.spinning { 1876 _g_.m.spinning = false 1877 nmspinning = atomic.Xadd(&sched.nmspinning, -1) 1878 if int32(nmspinning) < 0 { 1879 throw("findrunnable: negative nmspinning") 1880 } 1881 } else { 1882 nmspinning = atomic.Load(&sched.nmspinning) 1883 } 1884 1885 // M wakeup policy is deliberately somewhat conservative (see nmspinning handling), 1886 // so see if we need to wakeup another P here. 1887 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 { 1888 wakep() 1889 } 1890 } 1891 1892 // Injects the list of runnable G's into the scheduler. 1893 // Can run concurrently with GC. 1894 func injectglist(glist *g) { 1895 if glist == nil { 1896 return 1897 } 1898 if trace.enabled { 1899 for gp := glist; gp != nil; gp = gp.schedlink.ptr() { 1900 traceGoUnpark(gp, 0) 1901 } 1902 } 1903 lock(&sched.lock) 1904 var n int 1905 for n = 0; glist != nil; n++ { 1906 gp := glist 1907 glist = gp.schedlink.ptr() 1908 casgstatus(gp, _Gwaiting, _Grunnable) 1909 globrunqput(gp) 1910 } 1911 unlock(&sched.lock) 1912 for ; n != 0 && sched.npidle != 0; n-- { 1913 startm(nil, false) 1914 } 1915 } 1916 1917 // One round of scheduler: find a runnable goroutine and execute it. 1918 // Never returns. 1919 func schedule() { 1920 _g_ := getg() 1921 1922 if _g_.m.locks != 0 { 1923 throw("schedule: holding locks") 1924 } 1925 1926 if _g_.m.lockedg != nil { 1927 stoplockedm() 1928 execute(_g_.m.lockedg, false) // Never returns. 1929 } 1930 1931 top: 1932 if sched.gcwaiting != 0 { 1933 gcstopm() 1934 goto top 1935 } 1936 if _g_.m.p.ptr().runSafePointFn != 0 { 1937 runSafePointFn() 1938 } 1939 1940 var gp *g 1941 var inheritTime bool 1942 if trace.enabled || trace.shutdown { 1943 gp = traceReader() 1944 if gp != nil { 1945 casgstatus(gp, _Gwaiting, _Grunnable) 1946 traceGoUnpark(gp, 0) 1947 resetspinning() 1948 } 1949 } 1950 if gp == nil && gcBlackenEnabled != 0 { 1951 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 1952 if gp != nil { 1953 resetspinning() 1954 } 1955 } 1956 if gp == nil { 1957 // Check the global runnable queue once in a while to ensure fairness. 1958 // Otherwise two goroutines can completely occupy the local runqueue 1959 // by constantly respawning each other. 1960 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 1961 lock(&sched.lock) 1962 gp = globrunqget(_g_.m.p.ptr(), 1) 1963 unlock(&sched.lock) 1964 if gp != nil { 1965 resetspinning() 1966 } 1967 } 1968 } 1969 if gp == nil { 1970 gp, inheritTime = runqget(_g_.m.p.ptr()) 1971 if gp != nil && _g_.m.spinning { 1972 throw("schedule: spinning with local work") 1973 } 1974 } 1975 if gp == nil { 1976 gp, inheritTime = findrunnable() // blocks until work is available 1977 resetspinning() 1978 } 1979 1980 if gp.lockedm != nil { 1981 // Hands off own p to the locked m, 1982 // then blocks waiting for a new p. 1983 startlockedm(gp) 1984 goto top 1985 } 1986 1987 execute(gp, inheritTime) 1988 } 1989 1990 // dropg removes the association between m and the current goroutine m->curg (gp for short). 1991 // Typically a caller sets gp's status away from Grunning and then 1992 // immediately calls dropg to finish the job. The caller is also responsible 1993 // for arranging that gp will be restarted using ready at an 1994 // appropriate time. After calling dropg and arranging for gp to be 1995 // readied later, the caller can do other work but eventually should 1996 // call schedule to restart the scheduling of goroutines on this m. 1997 func dropg() { 1998 _g_ := getg() 1999 2000 if _g_.m.lockedg == nil { 2001 _g_.m.curg.m = nil 2002 _g_.m.curg = nil 2003 } 2004 } 2005 2006 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 2007 unlock((*mutex)(lock)) 2008 return true 2009 } 2010 2011 // park continuation on g0. 2012 func park_m(gp *g) { 2013 _g_ := getg() 2014 2015 if trace.enabled { 2016 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp) 2017 } 2018 2019 casgstatus(gp, _Grunning, _Gwaiting) 2020 dropg() 2021 2022 if _g_.m.waitunlockf != nil { 2023 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf)) 2024 ok := fn(gp, _g_.m.waitlock) 2025 _g_.m.waitunlockf = nil 2026 _g_.m.waitlock = nil 2027 if !ok { 2028 if trace.enabled { 2029 traceGoUnpark(gp, 2) 2030 } 2031 casgstatus(gp, _Gwaiting, _Grunnable) 2032 execute(gp, true) // Schedule it back, never returns. 2033 } 2034 } 2035 schedule() 2036 } 2037 2038 func goschedImpl(gp *g) { 2039 status := readgstatus(gp) 2040 if status&^_Gscan != _Grunning { 2041 dumpgstatus(gp) 2042 throw("bad g status") 2043 } 2044 casgstatus(gp, _Grunning, _Grunnable) 2045 dropg() 2046 lock(&sched.lock) 2047 globrunqput(gp) 2048 unlock(&sched.lock) 2049 2050 schedule() 2051 } 2052 2053 // Gosched continuation on g0. 2054 func gosched_m(gp *g) { 2055 if trace.enabled { 2056 traceGoSched() 2057 } 2058 goschedImpl(gp) 2059 } 2060 2061 func gopreempt_m(gp *g) { 2062 if trace.enabled { 2063 traceGoPreempt() 2064 } 2065 goschedImpl(gp) 2066 } 2067 2068 // Finishes execution of the current goroutine. 2069 func goexit1() { 2070 if raceenabled { 2071 racegoend() 2072 } 2073 if trace.enabled { 2074 traceGoEnd() 2075 } 2076 mcall(goexit0) 2077 } 2078 2079 // goexit continuation on g0. 2080 func goexit0(gp *g) { 2081 _g_ := getg() 2082 2083 casgstatus(gp, _Grunning, _Gdead) 2084 gp.m = nil 2085 gp.lockedm = nil 2086 _g_.m.lockedg = nil 2087 gp.paniconfault = false 2088 gp._defer = nil // should be true already but just in case. 2089 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 2090 gp.writebuf = nil 2091 gp.waitreason = "" 2092 gp.param = nil 2093 2094 dropg() 2095 2096 if _g_.m.locked&^_LockExternal != 0 { 2097 print("invalid m->locked = ", _g_.m.locked, "\n") 2098 throw("internal lockOSThread error") 2099 } 2100 _g_.m.locked = 0 2101 gfput(_g_.m.p.ptr(), gp) 2102 schedule() 2103 } 2104 2105 //go:nosplit 2106 //go:nowritebarrier 2107 func save(pc, sp uintptr) { 2108 _g_ := getg() 2109 2110 _g_.sched.pc = pc 2111 _g_.sched.sp = sp 2112 _g_.sched.lr = 0 2113 _g_.sched.ret = 0 2114 _g_.sched.ctxt = nil 2115 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 2116 } 2117 2118 // The goroutine g is about to enter a system call. 2119 // Record that it's not using the cpu anymore. 2120 // This is called only from the go syscall library and cgocall, 2121 // not from the low-level system calls used by the runtime. 2122 // 2123 // Entersyscall cannot split the stack: the gosave must 2124 // make g->sched refer to the caller's stack segment, because 2125 // entersyscall is going to return immediately after. 2126 // 2127 // Nothing entersyscall calls can split the stack either. 2128 // We cannot safely move the stack during an active call to syscall, 2129 // because we do not know which of the uintptr arguments are 2130 // really pointers (back into the stack). 2131 // In practice, this means that we make the fast path run through 2132 // entersyscall doing no-split things, and the slow path has to use systemstack 2133 // to run bigger things on the system stack. 2134 // 2135 // reentersyscall is the entry point used by cgo callbacks, where explicitly 2136 // saved SP and PC are restored. This is needed when exitsyscall will be called 2137 // from a function further up in the call stack than the parent, as g->syscallsp 2138 // must always point to a valid stack frame. entersyscall below is the normal 2139 // entry point for syscalls, which obtains the SP and PC from the caller. 2140 // 2141 // Syscall tracing: 2142 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 2143 // If the syscall does not block, that is it, we do not emit any other events. 2144 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 2145 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 2146 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 2147 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 2148 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 2149 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 2150 // and we wait for the increment before emitting traceGoSysExit. 2151 // Note that the increment is done even if tracing is not enabled, 2152 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 2153 // 2154 //go:nosplit 2155 func reentersyscall(pc, sp uintptr) { 2156 _g_ := getg() 2157 2158 // Disable preemption because during this function g is in Gsyscall status, 2159 // but can have inconsistent g->sched, do not let GC observe it. 2160 _g_.m.locks++ 2161 2162 // Entersyscall must not call any function that might split/grow the stack. 2163 // (See details in comment above.) 2164 // Catch calls that might, by replacing the stack guard with something that 2165 // will trip any stack check and leaving a flag to tell newstack to die. 2166 _g_.stackguard0 = stackPreempt 2167 _g_.throwsplit = true 2168 2169 // Leave SP around for GC and traceback. 2170 save(pc, sp) 2171 _g_.syscallsp = sp 2172 _g_.syscallpc = pc 2173 casgstatus(_g_, _Grunning, _Gsyscall) 2174 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2175 systemstack(func() { 2176 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2177 throw("entersyscall") 2178 }) 2179 } 2180 2181 if trace.enabled { 2182 systemstack(traceGoSysCall) 2183 // systemstack itself clobbers g.sched.{pc,sp} and we might 2184 // need them later when the G is genuinely blocked in a 2185 // syscall 2186 save(pc, sp) 2187 } 2188 2189 if atomic.Load(&sched.sysmonwait) != 0 { // TODO: fast atomic 2190 systemstack(entersyscall_sysmon) 2191 save(pc, sp) 2192 } 2193 2194 if _g_.m.p.ptr().runSafePointFn != 0 { 2195 // runSafePointFn may stack split if run on this stack 2196 systemstack(runSafePointFn) 2197 save(pc, sp) 2198 } 2199 2200 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2201 _g_.sysblocktraced = true 2202 _g_.m.mcache = nil 2203 _g_.m.p.ptr().m = 0 2204 atomic.Store(&_g_.m.p.ptr().status, _Psyscall) 2205 if sched.gcwaiting != 0 { 2206 systemstack(entersyscall_gcwait) 2207 save(pc, sp) 2208 } 2209 2210 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). 2211 // We set _StackGuard to StackPreempt so that first split stack check calls morestack. 2212 // Morestack detects this case and throws. 2213 _g_.stackguard0 = stackPreempt 2214 _g_.m.locks-- 2215 } 2216 2217 // Standard syscall entry used by the go syscall library and normal cgo calls. 2218 //go:nosplit 2219 func entersyscall(dummy int32) { 2220 reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2221 } 2222 2223 func entersyscall_sysmon() { 2224 lock(&sched.lock) 2225 if atomic.Load(&sched.sysmonwait) != 0 { 2226 atomic.Store(&sched.sysmonwait, 0) 2227 notewakeup(&sched.sysmonnote) 2228 } 2229 unlock(&sched.lock) 2230 } 2231 2232 func entersyscall_gcwait() { 2233 _g_ := getg() 2234 _p_ := _g_.m.p.ptr() 2235 2236 lock(&sched.lock) 2237 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 2238 if trace.enabled { 2239 traceGoSysBlock(_p_) 2240 traceProcStop(_p_) 2241 } 2242 _p_.syscalltick++ 2243 if sched.stopwait--; sched.stopwait == 0 { 2244 notewakeup(&sched.stopnote) 2245 } 2246 } 2247 unlock(&sched.lock) 2248 } 2249 2250 // The same as entersyscall(), but with a hint that the syscall is blocking. 2251 //go:nosplit 2252 func entersyscallblock(dummy int32) { 2253 _g_ := getg() 2254 2255 _g_.m.locks++ // see comment in entersyscall 2256 _g_.throwsplit = true 2257 _g_.stackguard0 = stackPreempt // see comment in entersyscall 2258 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2259 _g_.sysblocktraced = true 2260 _g_.m.p.ptr().syscalltick++ 2261 2262 // Leave SP around for GC and traceback. 2263 pc := getcallerpc(unsafe.Pointer(&dummy)) 2264 sp := getcallersp(unsafe.Pointer(&dummy)) 2265 save(pc, sp) 2266 _g_.syscallsp = _g_.sched.sp 2267 _g_.syscallpc = _g_.sched.pc 2268 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2269 sp1 := sp 2270 sp2 := _g_.sched.sp 2271 sp3 := _g_.syscallsp 2272 systemstack(func() { 2273 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2274 throw("entersyscallblock") 2275 }) 2276 } 2277 casgstatus(_g_, _Grunning, _Gsyscall) 2278 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2279 systemstack(func() { 2280 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2281 throw("entersyscallblock") 2282 }) 2283 } 2284 2285 systemstack(entersyscallblock_handoff) 2286 2287 // Resave for traceback during blocked call. 2288 save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2289 2290 _g_.m.locks-- 2291 } 2292 2293 func entersyscallblock_handoff() { 2294 if trace.enabled { 2295 traceGoSysCall() 2296 traceGoSysBlock(getg().m.p.ptr()) 2297 } 2298 handoffp(releasep()) 2299 } 2300 2301 // The goroutine g exited its system call. 2302 // Arrange for it to run on a cpu again. 2303 // This is called only from the go syscall library, not 2304 // from the low-level system calls used by the 2305 //go:nosplit 2306 func exitsyscall(dummy int32) { 2307 _g_ := getg() 2308 2309 _g_.m.locks++ // see comment in entersyscall 2310 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp { 2311 throw("exitsyscall: syscall frame is no longer valid") 2312 } 2313 2314 _g_.waitsince = 0 2315 oldp := _g_.m.p.ptr() 2316 if exitsyscallfast() { 2317 if _g_.m.mcache == nil { 2318 throw("lost mcache") 2319 } 2320 if trace.enabled { 2321 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2322 systemstack(traceGoStart) 2323 } 2324 } 2325 // There's a cpu for us, so we can run. 2326 _g_.m.p.ptr().syscalltick++ 2327 // We need to cas the status and scan before resuming... 2328 casgstatus(_g_, _Gsyscall, _Grunning) 2329 2330 // Garbage collector isn't running (since we are), 2331 // so okay to clear syscallsp. 2332 _g_.syscallsp = 0 2333 _g_.m.locks-- 2334 if _g_.preempt { 2335 // restore the preemption request in case we've cleared it in newstack 2336 _g_.stackguard0 = stackPreempt 2337 } else { 2338 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 2339 _g_.stackguard0 = _g_.stack.lo + _StackGuard 2340 } 2341 _g_.throwsplit = false 2342 return 2343 } 2344 2345 _g_.sysexitticks = 0 2346 _g_.sysexitseq = 0 2347 if trace.enabled { 2348 // Wait till traceGoSysBlock event is emitted. 2349 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2350 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 2351 osyield() 2352 } 2353 // We can't trace syscall exit right now because we don't have a P. 2354 // Tracing code can invoke write barriers that cannot run without a P. 2355 // So instead we remember the syscall exit time and emit the event 2356 // in execute when we have a P. 2357 _g_.sysexitseq, _g_.sysexitticks = tracestamp() 2358 } 2359 2360 _g_.m.locks-- 2361 2362 // Call the scheduler. 2363 mcall(exitsyscall0) 2364 2365 if _g_.m.mcache == nil { 2366 throw("lost mcache") 2367 } 2368 2369 // Scheduler returned, so we're allowed to run now. 2370 // Delete the syscallsp information that we left for 2371 // the garbage collector during the system call. 2372 // Must wait until now because until gosched returns 2373 // we don't know for sure that the garbage collector 2374 // is not running. 2375 _g_.syscallsp = 0 2376 _g_.m.p.ptr().syscalltick++ 2377 _g_.throwsplit = false 2378 } 2379 2380 //go:nosplit 2381 func exitsyscallfast() bool { 2382 _g_ := getg() 2383 2384 // Freezetheworld sets stopwait but does not retake P's. 2385 if sched.stopwait == freezeStopWait { 2386 _g_.m.mcache = nil 2387 _g_.m.p = 0 2388 return false 2389 } 2390 2391 // Try to re-acquire the last P. 2392 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) { 2393 // There's a cpu for us, so we can run. 2394 _g_.m.mcache = _g_.m.p.ptr().mcache 2395 _g_.m.p.ptr().m.set(_g_.m) 2396 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2397 if trace.enabled { 2398 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 2399 // traceGoSysBlock for this syscall was already emitted, 2400 // but here we effectively retake the p from the new syscall running on the same p. 2401 systemstack(func() { 2402 // Denote blocking of the new syscall. 2403 traceGoSysBlock(_g_.m.p.ptr()) 2404 // Denote completion of the current syscall. 2405 traceGoSysExit(tracestamp()) 2406 }) 2407 } 2408 _g_.m.p.ptr().syscalltick++ 2409 } 2410 return true 2411 } 2412 2413 // Try to get any other idle P. 2414 oldp := _g_.m.p.ptr() 2415 _g_.m.mcache = nil 2416 _g_.m.p = 0 2417 if sched.pidle != 0 { 2418 var ok bool 2419 systemstack(func() { 2420 ok = exitsyscallfast_pidle() 2421 if ok && trace.enabled { 2422 if oldp != nil { 2423 // Wait till traceGoSysBlock event is emitted. 2424 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2425 for oldp.syscalltick == _g_.m.syscalltick { 2426 osyield() 2427 } 2428 } 2429 traceGoSysExit(tracestamp()) 2430 } 2431 }) 2432 if ok { 2433 return true 2434 } 2435 } 2436 return false 2437 } 2438 2439 func exitsyscallfast_pidle() bool { 2440 lock(&sched.lock) 2441 _p_ := pidleget() 2442 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 2443 atomic.Store(&sched.sysmonwait, 0) 2444 notewakeup(&sched.sysmonnote) 2445 } 2446 unlock(&sched.lock) 2447 if _p_ != nil { 2448 acquirep(_p_) 2449 return true 2450 } 2451 return false 2452 } 2453 2454 // exitsyscall slow path on g0. 2455 // Failed to acquire P, enqueue gp as runnable. 2456 func exitsyscall0(gp *g) { 2457 _g_ := getg() 2458 2459 casgstatus(gp, _Gsyscall, _Grunnable) 2460 dropg() 2461 lock(&sched.lock) 2462 _p_ := pidleget() 2463 if _p_ == nil { 2464 globrunqput(gp) 2465 } else if atomic.Load(&sched.sysmonwait) != 0 { 2466 atomic.Store(&sched.sysmonwait, 0) 2467 notewakeup(&sched.sysmonnote) 2468 } 2469 unlock(&sched.lock) 2470 if _p_ != nil { 2471 acquirep(_p_) 2472 execute(gp, false) // Never returns. 2473 } 2474 if _g_.m.lockedg != nil { 2475 // Wait until another thread schedules gp and so m again. 2476 stoplockedm() 2477 execute(gp, false) // Never returns. 2478 } 2479 stopm() 2480 schedule() // Never returns. 2481 } 2482 2483 func beforefork() { 2484 gp := getg().m.curg 2485 2486 // Fork can hang if preempted with signals frequently enough (see issue 5517). 2487 // Ensure that we stay on the same M where we disable profiling. 2488 gp.m.locks++ 2489 if gp.m.profilehz != 0 { 2490 resetcpuprofiler(0) 2491 } 2492 2493 // This function is called before fork in syscall package. 2494 // Code between fork and exec must not allocate memory nor even try to grow stack. 2495 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 2496 // runtime_AfterFork will undo this in parent process, but not in child. 2497 gp.stackguard0 = stackFork 2498 } 2499 2500 // Called from syscall package before fork. 2501 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 2502 //go:nosplit 2503 func syscall_runtime_BeforeFork() { 2504 systemstack(beforefork) 2505 } 2506 2507 func afterfork() { 2508 gp := getg().m.curg 2509 2510 // See the comment in beforefork. 2511 gp.stackguard0 = gp.stack.lo + _StackGuard 2512 2513 hz := sched.profilehz 2514 if hz != 0 { 2515 resetcpuprofiler(hz) 2516 } 2517 gp.m.locks-- 2518 } 2519 2520 // Called from syscall package after fork in parent. 2521 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 2522 //go:nosplit 2523 func syscall_runtime_AfterFork() { 2524 systemstack(afterfork) 2525 } 2526 2527 // Allocate a new g, with a stack big enough for stacksize bytes. 2528 func malg(stacksize int32) *g { 2529 newg := new(g) 2530 if stacksize >= 0 { 2531 stacksize = round2(_StackSystem + stacksize) 2532 systemstack(func() { 2533 newg.stack, newg.stkbar = stackalloc(uint32(stacksize)) 2534 }) 2535 newg.stackguard0 = newg.stack.lo + _StackGuard 2536 newg.stackguard1 = ^uintptr(0) 2537 newg.stackAlloc = uintptr(stacksize) 2538 } 2539 return newg 2540 } 2541 2542 // Create a new g running fn with siz bytes of arguments. 2543 // Put it on the queue of g's waiting to run. 2544 // The compiler turns a go statement into a call to this. 2545 // Cannot split the stack because it assumes that the arguments 2546 // are available sequentially after &fn; they would not be 2547 // copied if a stack split occurred. 2548 //go:nosplit 2549 func newproc(siz int32, fn *funcval) { 2550 argp := add(unsafe.Pointer(&fn), sys.PtrSize) 2551 pc := getcallerpc(unsafe.Pointer(&siz)) 2552 systemstack(func() { 2553 newproc1(fn, (*uint8)(argp), siz, 0, pc) 2554 }) 2555 } 2556 2557 // Create a new g running fn with narg bytes of arguments starting 2558 // at argp and returning nret bytes of results. callerpc is the 2559 // address of the go statement that created this. The new g is put 2560 // on the queue of g's waiting to run. 2561 func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g { 2562 _g_ := getg() 2563 2564 if fn == nil { 2565 _g_.m.throwing = -1 // do not dump full stacks 2566 throw("go of nil func value") 2567 } 2568 _g_.m.locks++ // disable preemption because it can be holding p in a local var 2569 siz := narg + nret 2570 siz = (siz + 7) &^ 7 2571 2572 // We could allocate a larger initial stack if necessary. 2573 // Not worth it: this is almost always an error. 2574 // 4*sizeof(uintreg): extra space added below 2575 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 2576 if siz >= _StackMin-4*sys.RegSize-sys.RegSize { 2577 throw("newproc: function arguments too large for new goroutine") 2578 } 2579 2580 _p_ := _g_.m.p.ptr() 2581 newg := gfget(_p_) 2582 if newg == nil { 2583 newg = malg(_StackMin) 2584 casgstatus(newg, _Gidle, _Gdead) 2585 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 2586 } 2587 if newg.stack.hi == 0 { 2588 throw("newproc1: newg missing stack") 2589 } 2590 2591 if readgstatus(newg) != _Gdead { 2592 throw("newproc1: new g is not Gdead") 2593 } 2594 2595 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame 2596 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign 2597 sp := newg.stack.hi - totalSize 2598 spArg := sp 2599 if usesLR { 2600 // caller's LR 2601 *(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil 2602 prepGoExitFrame(sp) 2603 spArg += sys.MinFrameSize 2604 } 2605 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) 2606 2607 memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 2608 newg.sched.sp = sp 2609 newg.stktopsp = sp 2610 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 2611 newg.sched.g = guintptr(unsafe.Pointer(newg)) 2612 gostartcallfn(&newg.sched, fn) 2613 newg.gopc = callerpc 2614 newg.startpc = fn.fn 2615 casgstatus(newg, _Gdead, _Grunnable) 2616 2617 if _p_.goidcache == _p_.goidcacheend { 2618 // Sched.goidgen is the last allocated id, 2619 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 2620 // At startup sched.goidgen=0, so main goroutine receives goid=1. 2621 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 2622 _p_.goidcache -= _GoidCacheBatch - 1 2623 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 2624 } 2625 newg.goid = int64(_p_.goidcache) 2626 _p_.goidcache++ 2627 if raceenabled { 2628 newg.racectx = racegostart(callerpc) 2629 } 2630 if trace.enabled { 2631 traceGoCreate(newg, newg.startpc) 2632 } 2633 runqput(_p_, newg, true) 2634 2635 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic 2636 wakep() 2637 } 2638 _g_.m.locks-- 2639 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 2640 _g_.stackguard0 = stackPreempt 2641 } 2642 return newg 2643 } 2644 2645 // Put on gfree list. 2646 // If local list is too long, transfer a batch to the global list. 2647 func gfput(_p_ *p, gp *g) { 2648 if readgstatus(gp) != _Gdead { 2649 throw("gfput: bad status (not Gdead)") 2650 } 2651 2652 stksize := gp.stackAlloc 2653 2654 if stksize != _FixedStack { 2655 // non-standard stack size - free it. 2656 stackfree(gp.stack, gp.stackAlloc) 2657 gp.stack.lo = 0 2658 gp.stack.hi = 0 2659 gp.stackguard0 = 0 2660 gp.stkbar = nil 2661 gp.stkbarPos = 0 2662 } else { 2663 // Reset stack barriers. 2664 gp.stkbar = gp.stkbar[:0] 2665 gp.stkbarPos = 0 2666 } 2667 2668 gp.schedlink.set(_p_.gfree) 2669 _p_.gfree = gp 2670 _p_.gfreecnt++ 2671 if _p_.gfreecnt >= 64 { 2672 lock(&sched.gflock) 2673 for _p_.gfreecnt >= 32 { 2674 _p_.gfreecnt-- 2675 gp = _p_.gfree 2676 _p_.gfree = gp.schedlink.ptr() 2677 gp.schedlink.set(sched.gfree) 2678 sched.gfree = gp 2679 sched.ngfree++ 2680 } 2681 unlock(&sched.gflock) 2682 } 2683 } 2684 2685 // Get from gfree list. 2686 // If local list is empty, grab a batch from global list. 2687 func gfget(_p_ *p) *g { 2688 retry: 2689 gp := _p_.gfree 2690 if gp == nil && sched.gfree != nil { 2691 lock(&sched.gflock) 2692 for _p_.gfreecnt < 32 && sched.gfree != nil { 2693 _p_.gfreecnt++ 2694 gp = sched.gfree 2695 sched.gfree = gp.schedlink.ptr() 2696 sched.ngfree-- 2697 gp.schedlink.set(_p_.gfree) 2698 _p_.gfree = gp 2699 } 2700 unlock(&sched.gflock) 2701 goto retry 2702 } 2703 if gp != nil { 2704 _p_.gfree = gp.schedlink.ptr() 2705 _p_.gfreecnt-- 2706 if gp.stack.lo == 0 { 2707 // Stack was deallocated in gfput. Allocate a new one. 2708 systemstack(func() { 2709 gp.stack, gp.stkbar = stackalloc(_FixedStack) 2710 }) 2711 gp.stackguard0 = gp.stack.lo + _StackGuard 2712 gp.stackAlloc = _FixedStack 2713 } else { 2714 if raceenabled { 2715 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) 2716 } 2717 if msanenabled { 2718 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) 2719 } 2720 } 2721 } 2722 return gp 2723 } 2724 2725 // Purge all cached G's from gfree list to the global list. 2726 func gfpurge(_p_ *p) { 2727 lock(&sched.gflock) 2728 for _p_.gfreecnt != 0 { 2729 _p_.gfreecnt-- 2730 gp := _p_.gfree 2731 _p_.gfree = gp.schedlink.ptr() 2732 gp.schedlink.set(sched.gfree) 2733 sched.gfree = gp 2734 sched.ngfree++ 2735 } 2736 unlock(&sched.gflock) 2737 } 2738 2739 // Breakpoint executes a breakpoint trap. 2740 func Breakpoint() { 2741 breakpoint() 2742 } 2743 2744 // dolockOSThread is called by LockOSThread and lockOSThread below 2745 // after they modify m.locked. Do not allow preemption during this call, 2746 // or else the m might be different in this function than in the caller. 2747 //go:nosplit 2748 func dolockOSThread() { 2749 _g_ := getg() 2750 _g_.m.lockedg = _g_ 2751 _g_.lockedm = _g_.m 2752 } 2753 2754 //go:nosplit 2755 2756 // LockOSThread wires the calling goroutine to its current operating system thread. 2757 // Until the calling goroutine exits or calls UnlockOSThread, it will always 2758 // execute in that thread, and no other goroutine can. 2759 func LockOSThread() { 2760 getg().m.locked |= _LockExternal 2761 dolockOSThread() 2762 } 2763 2764 //go:nosplit 2765 func lockOSThread() { 2766 getg().m.locked += _LockInternal 2767 dolockOSThread() 2768 } 2769 2770 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 2771 // after they update m->locked. Do not allow preemption during this call, 2772 // or else the m might be in different in this function than in the caller. 2773 //go:nosplit 2774 func dounlockOSThread() { 2775 _g_ := getg() 2776 if _g_.m.locked != 0 { 2777 return 2778 } 2779 _g_.m.lockedg = nil 2780 _g_.lockedm = nil 2781 } 2782 2783 //go:nosplit 2784 2785 // UnlockOSThread unwires the calling goroutine from its fixed operating system thread. 2786 // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op. 2787 func UnlockOSThread() { 2788 getg().m.locked &^= _LockExternal 2789 dounlockOSThread() 2790 } 2791 2792 //go:nosplit 2793 func unlockOSThread() { 2794 _g_ := getg() 2795 if _g_.m.locked < _LockInternal { 2796 systemstack(badunlockosthread) 2797 } 2798 _g_.m.locked -= _LockInternal 2799 dounlockOSThread() 2800 } 2801 2802 func badunlockosthread() { 2803 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 2804 } 2805 2806 func gcount() int32 { 2807 n := int32(allglen) - sched.ngfree 2808 for i := 0; ; i++ { 2809 _p_ := allp[i] 2810 if _p_ == nil { 2811 break 2812 } 2813 n -= _p_.gfreecnt 2814 } 2815 2816 // All these variables can be changed concurrently, so the result can be inconsistent. 2817 // But at least the current goroutine is running. 2818 if n < 1 { 2819 n = 1 2820 } 2821 return n 2822 } 2823 2824 func mcount() int32 { 2825 return sched.mcount 2826 } 2827 2828 var prof struct { 2829 lock uint32 2830 hz int32 2831 } 2832 2833 func _System() { _System() } 2834 func _ExternalCode() { _ExternalCode() } 2835 func _GC() { _GC() } 2836 2837 // Called if we receive a SIGPROF signal. 2838 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 2839 if prof.hz == 0 { 2840 return 2841 } 2842 2843 // Profiling runs concurrently with GC, so it must not allocate. 2844 mp.mallocing++ 2845 2846 // Define that a "user g" is a user-created goroutine, and a "system g" 2847 // is one that is m->g0 or m->gsignal. 2848 // 2849 // We might be interrupted for profiling halfway through a 2850 // goroutine switch. The switch involves updating three (or four) values: 2851 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 2852 // because once it gets updated the new g is running. 2853 // 2854 // When switching from a user g to a system g, LR is not considered live, 2855 // so the update only affects g, SP, and PC. Since PC must be last, there 2856 // the possible partial transitions in ordinary execution are (1) g alone is updated, 2857 // (2) both g and SP are updated, and (3) SP alone is updated. 2858 // If SP or g alone is updated, we can detect the partial transition by checking 2859 // whether the SP is within g's stack bounds. (We could also require that SP 2860 // be changed only after g, but the stack bounds check is needed by other 2861 // cases, so there is no need to impose an additional requirement.) 2862 // 2863 // There is one exceptional transition to a system g, not in ordinary execution. 2864 // When a signal arrives, the operating system starts the signal handler running 2865 // with an updated PC and SP. The g is updated last, at the beginning of the 2866 // handler. There are two reasons this is okay. First, until g is updated the 2867 // g and SP do not match, so the stack bounds check detects the partial transition. 2868 // Second, signal handlers currently run with signals disabled, so a profiling 2869 // signal cannot arrive during the handler. 2870 // 2871 // When switching from a system g to a user g, there are three possibilities. 2872 // 2873 // First, it may be that the g switch has no PC update, because the SP 2874 // either corresponds to a user g throughout (as in asmcgocall) 2875 // or because it has been arranged to look like a user g frame 2876 // (as in cgocallback_gofunc). In this case, since the entire 2877 // transition is a g+SP update, a partial transition updating just one of 2878 // those will be detected by the stack bounds check. 2879 // 2880 // Second, when returning from a signal handler, the PC and SP updates 2881 // are performed by the operating system in an atomic update, so the g 2882 // update must be done before them. The stack bounds check detects 2883 // the partial transition here, and (again) signal handlers run with signals 2884 // disabled, so a profiling signal cannot arrive then anyway. 2885 // 2886 // Third, the common case: it may be that the switch updates g, SP, and PC 2887 // separately. If the PC is within any of the functions that does this, 2888 // we don't ask for a traceback. C.F. the function setsSP for more about this. 2889 // 2890 // There is another apparently viable approach, recorded here in case 2891 // the "PC within setsSP function" check turns out not to be usable. 2892 // It would be possible to delay the update of either g or SP until immediately 2893 // before the PC update instruction. Then, because of the stack bounds check, 2894 // the only problematic interrupt point is just before that PC update instruction, 2895 // and the sigprof handler can detect that instruction and simulate stepping past 2896 // it in order to reach a consistent state. On ARM, the update of g must be made 2897 // in two places (in R10 and also in a TLS slot), so the delayed update would 2898 // need to be the SP update. The sigprof handler must read the instruction at 2899 // the current PC and if it was the known instruction (for example, JMP BX or 2900 // MOV R2, PC), use that other register in place of the PC value. 2901 // The biggest drawback to this solution is that it requires that we can tell 2902 // whether it's safe to read from the memory pointed at by PC. 2903 // In a correct program, we can test PC == nil and otherwise read, 2904 // but if a profiling signal happens at the instant that a program executes 2905 // a bad jump (before the program manages to handle the resulting fault) 2906 // the profiling handler could fault trying to read nonexistent memory. 2907 // 2908 // To recap, there are no constraints on the assembly being used for the 2909 // transition. We simply require that g and SP match and that the PC is not 2910 // in gogo. 2911 traceback := true 2912 haveStackLock := false 2913 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) { 2914 traceback = false 2915 } else if gp.m.curg != nil { 2916 if gcTryLockStackBarriers(gp.m.curg) { 2917 haveStackLock = true 2918 } else { 2919 // Stack barriers are being inserted or 2920 // removed, so we can't get a consistent 2921 // traceback right now. 2922 traceback = false 2923 } 2924 } 2925 var stk [maxCPUProfStack]uintptr 2926 n := 0 2927 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 2928 // Cgo, we can't unwind and symbolize arbitrary C code, 2929 // so instead collect Go stack that leads to the cgo call. 2930 // This is especially important on windows, since all syscalls are cgo calls. 2931 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0) 2932 } else if traceback { 2933 flags := uint(_TraceTrap | _TraceJumpStack) 2934 if gp.m.curg != nil && readgstatus(gp.m.curg) == _Gcopystack { 2935 // We can traceback the system stack, but 2936 // don't jump to the potentially inconsistent 2937 // user stack. 2938 flags &^= _TraceJumpStack 2939 } 2940 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, flags) 2941 } 2942 if !traceback || n <= 0 { 2943 // Normal traceback is impossible or has failed. 2944 // See if it falls into several common cases. 2945 n = 0 2946 if GOOS == "windows" && n == 0 && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 2947 // Libcall, i.e. runtime syscall on windows. 2948 // Collect Go stack that leads to the call. 2949 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 2950 } 2951 if n == 0 { 2952 // If all of the above has failed, account it against abstract "System" or "GC". 2953 n = 2 2954 // "ExternalCode" is better than "etext". 2955 if pc > firstmoduledata.etext { 2956 pc = funcPC(_ExternalCode) + sys.PCQuantum 2957 } 2958 stk[0] = pc 2959 if mp.preemptoff != "" || mp.helpgc != 0 { 2960 stk[1] = funcPC(_GC) + sys.PCQuantum 2961 } else { 2962 stk[1] = funcPC(_System) + sys.PCQuantum 2963 } 2964 } 2965 } 2966 if haveStackLock { 2967 gcUnlockStackBarriers(gp.m.curg) 2968 } 2969 2970 if prof.hz != 0 { 2971 // Simple cas-lock to coordinate with setcpuprofilerate. 2972 for !atomic.Cas(&prof.lock, 0, 1) { 2973 osyield() 2974 } 2975 if prof.hz != 0 { 2976 cpuprof.add(stk[:n]) 2977 } 2978 atomic.Store(&prof.lock, 0) 2979 } 2980 mp.mallocing-- 2981 } 2982 2983 // Reports whether a function will set the SP 2984 // to an absolute value. Important that 2985 // we don't traceback when these are at the bottom 2986 // of the stack since we can't be sure that we will 2987 // find the caller. 2988 // 2989 // If the function is not on the bottom of the stack 2990 // we assume that it will have set it up so that traceback will be consistent, 2991 // either by being a traceback terminating function 2992 // or putting one on the stack at the right offset. 2993 func setsSP(pc uintptr) bool { 2994 f := findfunc(pc) 2995 if f == nil { 2996 // couldn't find the function for this PC, 2997 // so assume the worst and stop traceback 2998 return true 2999 } 3000 switch f.entry { 3001 case gogoPC, systemstackPC, mcallPC, morestackPC: 3002 return true 3003 } 3004 return false 3005 } 3006 3007 // Arrange to call fn with a traceback hz times a second. 3008 func setcpuprofilerate_m(hz int32) { 3009 // Force sane arguments. 3010 if hz < 0 { 3011 hz = 0 3012 } 3013 3014 // Disable preemption, otherwise we can be rescheduled to another thread 3015 // that has profiling enabled. 3016 _g_ := getg() 3017 _g_.m.locks++ 3018 3019 // Stop profiler on this thread so that it is safe to lock prof. 3020 // if a profiling signal came in while we had prof locked, 3021 // it would deadlock. 3022 resetcpuprofiler(0) 3023 3024 for !atomic.Cas(&prof.lock, 0, 1) { 3025 osyield() 3026 } 3027 prof.hz = hz 3028 atomic.Store(&prof.lock, 0) 3029 3030 lock(&sched.lock) 3031 sched.profilehz = hz 3032 unlock(&sched.lock) 3033 3034 if hz != 0 { 3035 resetcpuprofiler(hz) 3036 } 3037 3038 _g_.m.locks-- 3039 } 3040 3041 // Change number of processors. The world is stopped, sched is locked. 3042 // gcworkbufs are not being modified by either the GC or 3043 // the write barrier code. 3044 // Returns list of Ps with local work, they need to be scheduled by the caller. 3045 func procresize(nprocs int32) *p { 3046 old := gomaxprocs 3047 if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs { 3048 throw("procresize: invalid arg") 3049 } 3050 if trace.enabled { 3051 traceGomaxprocs(nprocs) 3052 } 3053 3054 // update statistics 3055 now := nanotime() 3056 if sched.procresizetime != 0 { 3057 sched.totaltime += int64(old) * (now - sched.procresizetime) 3058 } 3059 sched.procresizetime = now 3060 3061 // initialize new P's 3062 for i := int32(0); i < nprocs; i++ { 3063 pp := allp[i] 3064 if pp == nil { 3065 pp = new(p) 3066 pp.id = i 3067 pp.status = _Pgcstop 3068 pp.sudogcache = pp.sudogbuf[:0] 3069 for i := range pp.deferpool { 3070 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 3071 } 3072 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 3073 } 3074 if pp.mcache == nil { 3075 if old == 0 && i == 0 { 3076 if getg().m.mcache == nil { 3077 throw("missing mcache?") 3078 } 3079 pp.mcache = getg().m.mcache // bootstrap 3080 } else { 3081 pp.mcache = allocmcache() 3082 } 3083 } 3084 } 3085 3086 // free unused P's 3087 for i := nprocs; i < old; i++ { 3088 p := allp[i] 3089 if trace.enabled { 3090 if p == getg().m.p.ptr() { 3091 // moving to p[0], pretend that we were descheduled 3092 // and then scheduled again to keep the trace sane. 3093 traceGoSched() 3094 traceProcStop(p) 3095 } 3096 } 3097 // move all runnable goroutines to the global queue 3098 for p.runqhead != p.runqtail { 3099 // pop from tail of local queue 3100 p.runqtail-- 3101 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr() 3102 // push onto head of global queue 3103 globrunqputhead(gp) 3104 } 3105 if p.runnext != 0 { 3106 globrunqputhead(p.runnext.ptr()) 3107 p.runnext = 0 3108 } 3109 // if there's a background worker, make it runnable and put 3110 // it on the global queue so it can clean itself up 3111 if p.gcBgMarkWorker != nil { 3112 casgstatus(p.gcBgMarkWorker, _Gwaiting, _Grunnable) 3113 if trace.enabled { 3114 traceGoUnpark(p.gcBgMarkWorker, 0) 3115 } 3116 globrunqput(p.gcBgMarkWorker) 3117 p.gcBgMarkWorker = nil 3118 } 3119 for i := range p.sudogbuf { 3120 p.sudogbuf[i] = nil 3121 } 3122 p.sudogcache = p.sudogbuf[:0] 3123 for i := range p.deferpool { 3124 for j := range p.deferpoolbuf[i] { 3125 p.deferpoolbuf[i][j] = nil 3126 } 3127 p.deferpool[i] = p.deferpoolbuf[i][:0] 3128 } 3129 freemcache(p.mcache) 3130 p.mcache = nil 3131 gfpurge(p) 3132 traceProcFree(p) 3133 p.status = _Pdead 3134 // can't free P itself because it can be referenced by an M in syscall 3135 } 3136 3137 _g_ := getg() 3138 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 3139 // continue to use the current P 3140 _g_.m.p.ptr().status = _Prunning 3141 } else { 3142 // release the current P and acquire allp[0] 3143 if _g_.m.p != 0 { 3144 _g_.m.p.ptr().m = 0 3145 } 3146 _g_.m.p = 0 3147 _g_.m.mcache = nil 3148 p := allp[0] 3149 p.m = 0 3150 p.status = _Pidle 3151 acquirep(p) 3152 if trace.enabled { 3153 traceGoStart() 3154 } 3155 } 3156 var runnablePs *p 3157 for i := nprocs - 1; i >= 0; i-- { 3158 p := allp[i] 3159 if _g_.m.p.ptr() == p { 3160 continue 3161 } 3162 p.status = _Pidle 3163 if runqempty(p) { 3164 pidleput(p) 3165 } else { 3166 p.m.set(mget()) 3167 p.link.set(runnablePs) 3168 runnablePs = p 3169 } 3170 } 3171 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 3172 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 3173 return runnablePs 3174 } 3175 3176 // Associate p and the current m. 3177 func acquirep(_p_ *p) { 3178 acquirep1(_p_) 3179 3180 // have p; write barriers now allowed 3181 _g_ := getg() 3182 _g_.m.mcache = _p_.mcache 3183 3184 if trace.enabled { 3185 traceProcStart() 3186 } 3187 } 3188 3189 // May run during STW, so write barriers are not allowed. 3190 //go:nowritebarrier 3191 func acquirep1(_p_ *p) { 3192 _g_ := getg() 3193 3194 if _g_.m.p != 0 || _g_.m.mcache != nil { 3195 throw("acquirep: already in go") 3196 } 3197 if _p_.m != 0 || _p_.status != _Pidle { 3198 id := int32(0) 3199 if _p_.m != 0 { 3200 id = _p_.m.ptr().id 3201 } 3202 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 3203 throw("acquirep: invalid p state") 3204 } 3205 _g_.m.p.set(_p_) 3206 _p_.m.set(_g_.m) 3207 _p_.status = _Prunning 3208 } 3209 3210 // Disassociate p and the current m. 3211 func releasep() *p { 3212 _g_ := getg() 3213 3214 if _g_.m.p == 0 || _g_.m.mcache == nil { 3215 throw("releasep: invalid arg") 3216 } 3217 _p_ := _g_.m.p.ptr() 3218 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 3219 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 3220 throw("releasep: invalid p state") 3221 } 3222 if trace.enabled { 3223 traceProcStop(_g_.m.p.ptr()) 3224 } 3225 _g_.m.p = 0 3226 _g_.m.mcache = nil 3227 _p_.m = 0 3228 _p_.status = _Pidle 3229 return _p_ 3230 } 3231 3232 func incidlelocked(v int32) { 3233 lock(&sched.lock) 3234 sched.nmidlelocked += v 3235 if v > 0 { 3236 checkdead() 3237 } 3238 unlock(&sched.lock) 3239 } 3240 3241 // Check for deadlock situation. 3242 // The check is based on number of running M's, if 0 -> deadlock. 3243 func checkdead() { 3244 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 3245 // there are no running goroutines. The calling program is 3246 // assumed to be running. 3247 if islibrary || isarchive { 3248 return 3249 } 3250 3251 // If we are dying because of a signal caught on an already idle thread, 3252 // freezetheworld will cause all running threads to block. 3253 // And runtime will essentially enter into deadlock state, 3254 // except that there is a thread that will call exit soon. 3255 if panicking > 0 { 3256 return 3257 } 3258 3259 // -1 for sysmon 3260 run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1 3261 if run > 0 { 3262 return 3263 } 3264 if run < 0 { 3265 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n") 3266 throw("checkdead: inconsistent counts") 3267 } 3268 3269 grunning := 0 3270 lock(&allglock) 3271 for i := 0; i < len(allgs); i++ { 3272 gp := allgs[i] 3273 if isSystemGoroutine(gp) { 3274 continue 3275 } 3276 s := readgstatus(gp) 3277 switch s &^ _Gscan { 3278 case _Gwaiting: 3279 grunning++ 3280 case _Grunnable, 3281 _Grunning, 3282 _Gsyscall: 3283 unlock(&allglock) 3284 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 3285 throw("checkdead: runnable g") 3286 } 3287 } 3288 unlock(&allglock) 3289 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 3290 throw("no goroutines (main called runtime.Goexit) - deadlock!") 3291 } 3292 3293 // Maybe jump time forward for playground. 3294 gp := timejump() 3295 if gp != nil { 3296 casgstatus(gp, _Gwaiting, _Grunnable) 3297 globrunqput(gp) 3298 _p_ := pidleget() 3299 if _p_ == nil { 3300 throw("checkdead: no p for timer") 3301 } 3302 mp := mget() 3303 if mp == nil { 3304 // There should always be a free M since 3305 // nothing is running. 3306 throw("checkdead: no m for timer") 3307 } 3308 mp.nextp.set(_p_) 3309 notewakeup(&mp.park) 3310 return 3311 } 3312 3313 getg().m.throwing = -1 // do not dump full stacks 3314 throw("all goroutines are asleep - deadlock!") 3315 } 3316 3317 // forcegcperiod is the maximum time in nanoseconds between garbage 3318 // collections. If we go this long without a garbage collection, one 3319 // is forced to run. 3320 // 3321 // This is a variable for testing purposes. It normally doesn't change. 3322 var forcegcperiod int64 = 2 * 60 * 1e9 3323 3324 // Always runs without a P, so write barriers are not allowed. 3325 // 3326 //go:nowritebarrierrec 3327 func sysmon() { 3328 // If a heap span goes unused for 5 minutes after a garbage collection, 3329 // we hand it back to the operating system. 3330 scavengelimit := int64(5 * 60 * 1e9) 3331 3332 if debug.scavenge > 0 { 3333 // Scavenge-a-lot for testing. 3334 forcegcperiod = 10 * 1e6 3335 scavengelimit = 20 * 1e6 3336 } 3337 3338 lastscavenge := nanotime() 3339 nscavenge := 0 3340 3341 lasttrace := int64(0) 3342 idle := 0 // how many cycles in succession we had not wokeup somebody 3343 delay := uint32(0) 3344 for { 3345 if idle == 0 { // start with 20us sleep... 3346 delay = 20 3347 } else if idle > 50 { // start doubling the sleep after 1ms... 3348 delay *= 2 3349 } 3350 if delay > 10*1000 { // up to 10ms 3351 delay = 10 * 1000 3352 } 3353 usleep(delay) 3354 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic 3355 lock(&sched.lock) 3356 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 3357 atomic.Store(&sched.sysmonwait, 1) 3358 unlock(&sched.lock) 3359 // Make wake-up period small enough 3360 // for the sampling to be correct. 3361 maxsleep := forcegcperiod / 2 3362 if scavengelimit < forcegcperiod { 3363 maxsleep = scavengelimit / 2 3364 } 3365 notetsleep(&sched.sysmonnote, maxsleep) 3366 lock(&sched.lock) 3367 atomic.Store(&sched.sysmonwait, 0) 3368 noteclear(&sched.sysmonnote) 3369 idle = 0 3370 delay = 20 3371 } 3372 unlock(&sched.lock) 3373 } 3374 // poll network if not polled for more than 10ms 3375 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 3376 now := nanotime() 3377 unixnow := unixnanotime() 3378 if lastpoll != 0 && lastpoll+10*1000*1000 < now { 3379 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 3380 gp := netpoll(false) // non-blocking - returns list of goroutines 3381 if gp != nil { 3382 // Need to decrement number of idle locked M's 3383 // (pretending that one more is running) before injectglist. 3384 // Otherwise it can lead to the following situation: 3385 // injectglist grabs all P's but before it starts M's to run the P's, 3386 // another M returns from syscall, finishes running its G, 3387 // observes that there is no work to do and no other running M's 3388 // and reports deadlock. 3389 incidlelocked(-1) 3390 injectglist(gp) 3391 incidlelocked(1) 3392 } 3393 } 3394 // retake P's blocked in syscalls 3395 // and preempt long running G's 3396 if retake(now) != 0 { 3397 idle = 0 3398 } else { 3399 idle++ 3400 } 3401 // check if we need to force a GC 3402 lastgc := int64(atomic.Load64(&memstats.last_gc)) 3403 if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomic.Load(&forcegc.idle) != 0 { 3404 lock(&forcegc.lock) 3405 forcegc.idle = 0 3406 forcegc.g.schedlink = 0 3407 injectglist(forcegc.g) 3408 unlock(&forcegc.lock) 3409 } 3410 // scavenge heap once in a while 3411 if lastscavenge+scavengelimit/2 < now { 3412 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) 3413 lastscavenge = now 3414 nscavenge++ 3415 } 3416 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace*1000000) <= now { 3417 lasttrace = now 3418 schedtrace(debug.scheddetail > 0) 3419 } 3420 } 3421 } 3422 3423 var pdesc [_MaxGomaxprocs]struct { 3424 schedtick uint32 3425 schedwhen int64 3426 syscalltick uint32 3427 syscallwhen int64 3428 } 3429 3430 // forcePreemptNS is the time slice given to a G before it is 3431 // preempted. 3432 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 3433 3434 func retake(now int64) uint32 { 3435 n := 0 3436 for i := int32(0); i < gomaxprocs; i++ { 3437 _p_ := allp[i] 3438 if _p_ == nil { 3439 continue 3440 } 3441 pd := &pdesc[i] 3442 s := _p_.status 3443 if s == _Psyscall { 3444 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 3445 t := int64(_p_.syscalltick) 3446 if int64(pd.syscalltick) != t { 3447 pd.syscalltick = uint32(t) 3448 pd.syscallwhen = now 3449 continue 3450 } 3451 // On the one hand we don't want to retake Ps if there is no other work to do, 3452 // but on the other hand we want to retake them eventually 3453 // because they can prevent the sysmon thread from deep sleep. 3454 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 3455 continue 3456 } 3457 // Need to decrement number of idle locked M's 3458 // (pretending that one more is running) before the CAS. 3459 // Otherwise the M from which we retake can exit the syscall, 3460 // increment nmidle and report deadlock. 3461 incidlelocked(-1) 3462 if atomic.Cas(&_p_.status, s, _Pidle) { 3463 if trace.enabled { 3464 traceGoSysBlock(_p_) 3465 traceProcStop(_p_) 3466 } 3467 n++ 3468 _p_.syscalltick++ 3469 handoffp(_p_) 3470 } 3471 incidlelocked(1) 3472 } else if s == _Prunning { 3473 // Preempt G if it's running for too long. 3474 t := int64(_p_.schedtick) 3475 if int64(pd.schedtick) != t { 3476 pd.schedtick = uint32(t) 3477 pd.schedwhen = now 3478 continue 3479 } 3480 if pd.schedwhen+forcePreemptNS > now { 3481 continue 3482 } 3483 preemptone(_p_) 3484 } 3485 } 3486 return uint32(n) 3487 } 3488 3489 // Tell all goroutines that they have been preempted and they should stop. 3490 // This function is purely best-effort. It can fail to inform a goroutine if a 3491 // processor just started running it. 3492 // No locks need to be held. 3493 // Returns true if preemption request was issued to at least one goroutine. 3494 func preemptall() bool { 3495 res := false 3496 for i := int32(0); i < gomaxprocs; i++ { 3497 _p_ := allp[i] 3498 if _p_ == nil || _p_.status != _Prunning { 3499 continue 3500 } 3501 if preemptone(_p_) { 3502 res = true 3503 } 3504 } 3505 return res 3506 } 3507 3508 // Tell the goroutine running on processor P to stop. 3509 // This function is purely best-effort. It can incorrectly fail to inform the 3510 // goroutine. It can send inform the wrong goroutine. Even if it informs the 3511 // correct goroutine, that goroutine might ignore the request if it is 3512 // simultaneously executing newstack. 3513 // No lock needs to be held. 3514 // Returns true if preemption request was issued. 3515 // The actual preemption will happen at some point in the future 3516 // and will be indicated by the gp->status no longer being 3517 // Grunning 3518 func preemptone(_p_ *p) bool { 3519 mp := _p_.m.ptr() 3520 if mp == nil || mp == getg().m { 3521 return false 3522 } 3523 gp := mp.curg 3524 if gp == nil || gp == mp.g0 { 3525 return false 3526 } 3527 3528 gp.preempt = true 3529 3530 // Every call in a go routine checks for stack overflow by 3531 // comparing the current stack pointer to gp->stackguard0. 3532 // Setting gp->stackguard0 to StackPreempt folds 3533 // preemption into the normal stack overflow check. 3534 gp.stackguard0 = stackPreempt 3535 return true 3536 } 3537 3538 var starttime int64 3539 3540 func schedtrace(detailed bool) { 3541 now := nanotime() 3542 if starttime == 0 { 3543 starttime = now 3544 } 3545 3546 lock(&sched.lock) 3547 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 3548 if detailed { 3549 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 3550 } 3551 // We must be careful while reading data from P's, M's and G's. 3552 // Even if we hold schedlock, most data can be changed concurrently. 3553 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 3554 for i := int32(0); i < gomaxprocs; i++ { 3555 _p_ := allp[i] 3556 if _p_ == nil { 3557 continue 3558 } 3559 mp := _p_.m.ptr() 3560 h := atomic.Load(&_p_.runqhead) 3561 t := atomic.Load(&_p_.runqtail) 3562 if detailed { 3563 id := int32(-1) 3564 if mp != nil { 3565 id = mp.id 3566 } 3567 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") 3568 } else { 3569 // In non-detailed mode format lengths of per-P run queues as: 3570 // [len1 len2 len3 len4] 3571 print(" ") 3572 if i == 0 { 3573 print("[") 3574 } 3575 print(t - h) 3576 if i == gomaxprocs-1 { 3577 print("]\n") 3578 } 3579 } 3580 } 3581 3582 if !detailed { 3583 unlock(&sched.lock) 3584 return 3585 } 3586 3587 for mp := allm; mp != nil; mp = mp.alllink { 3588 _p_ := mp.p.ptr() 3589 gp := mp.curg 3590 lockedg := mp.lockedg 3591 id1 := int32(-1) 3592 if _p_ != nil { 3593 id1 = _p_.id 3594 } 3595 id2 := int64(-1) 3596 if gp != nil { 3597 id2 = gp.goid 3598 } 3599 id3 := int64(-1) 3600 if lockedg != nil { 3601 id3 = lockedg.goid 3602 } 3603 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", getg().m.blocked, " lockedg=", id3, "\n") 3604 } 3605 3606 lock(&allglock) 3607 for gi := 0; gi < len(allgs); gi++ { 3608 gp := allgs[gi] 3609 mp := gp.m 3610 lockedm := gp.lockedm 3611 id1 := int32(-1) 3612 if mp != nil { 3613 id1 = mp.id 3614 } 3615 id2 := int32(-1) 3616 if lockedm != nil { 3617 id2 = lockedm.id 3618 } 3619 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n") 3620 } 3621 unlock(&allglock) 3622 unlock(&sched.lock) 3623 } 3624 3625 // Put mp on midle list. 3626 // Sched must be locked. 3627 // May run during STW, so write barriers are not allowed. 3628 //go:nowritebarrier 3629 func mput(mp *m) { 3630 mp.schedlink = sched.midle 3631 sched.midle.set(mp) 3632 sched.nmidle++ 3633 checkdead() 3634 } 3635 3636 // Try to get an m from midle list. 3637 // Sched must be locked. 3638 // May run during STW, so write barriers are not allowed. 3639 //go:nowritebarrier 3640 func mget() *m { 3641 mp := sched.midle.ptr() 3642 if mp != nil { 3643 sched.midle = mp.schedlink 3644 sched.nmidle-- 3645 } 3646 return mp 3647 } 3648 3649 // Put gp on the global runnable queue. 3650 // Sched must be locked. 3651 // May run during STW, so write barriers are not allowed. 3652 //go:nowritebarrier 3653 func globrunqput(gp *g) { 3654 gp.schedlink = 0 3655 if sched.runqtail != 0 { 3656 sched.runqtail.ptr().schedlink.set(gp) 3657 } else { 3658 sched.runqhead.set(gp) 3659 } 3660 sched.runqtail.set(gp) 3661 sched.runqsize++ 3662 } 3663 3664 // Put gp at the head of the global runnable queue. 3665 // Sched must be locked. 3666 // May run during STW, so write barriers are not allowed. 3667 //go:nowritebarrier 3668 func globrunqputhead(gp *g) { 3669 gp.schedlink = sched.runqhead 3670 sched.runqhead.set(gp) 3671 if sched.runqtail == 0 { 3672 sched.runqtail.set(gp) 3673 } 3674 sched.runqsize++ 3675 } 3676 3677 // Put a batch of runnable goroutines on the global runnable queue. 3678 // Sched must be locked. 3679 func globrunqputbatch(ghead *g, gtail *g, n int32) { 3680 gtail.schedlink = 0 3681 if sched.runqtail != 0 { 3682 sched.runqtail.ptr().schedlink.set(ghead) 3683 } else { 3684 sched.runqhead.set(ghead) 3685 } 3686 sched.runqtail.set(gtail) 3687 sched.runqsize += n 3688 } 3689 3690 // Try get a batch of G's from the global runnable queue. 3691 // Sched must be locked. 3692 func globrunqget(_p_ *p, max int32) *g { 3693 if sched.runqsize == 0 { 3694 return nil 3695 } 3696 3697 n := sched.runqsize/gomaxprocs + 1 3698 if n > sched.runqsize { 3699 n = sched.runqsize 3700 } 3701 if max > 0 && n > max { 3702 n = max 3703 } 3704 if n > int32(len(_p_.runq))/2 { 3705 n = int32(len(_p_.runq)) / 2 3706 } 3707 3708 sched.runqsize -= n 3709 if sched.runqsize == 0 { 3710 sched.runqtail = 0 3711 } 3712 3713 gp := sched.runqhead.ptr() 3714 sched.runqhead = gp.schedlink 3715 n-- 3716 for ; n > 0; n-- { 3717 gp1 := sched.runqhead.ptr() 3718 sched.runqhead = gp1.schedlink 3719 runqput(_p_, gp1, false) 3720 } 3721 return gp 3722 } 3723 3724 // Put p to on _Pidle list. 3725 // Sched must be locked. 3726 // May run during STW, so write barriers are not allowed. 3727 //go:nowritebarrier 3728 func pidleput(_p_ *p) { 3729 if !runqempty(_p_) { 3730 throw("pidleput: P has non-empty run queue") 3731 } 3732 _p_.link = sched.pidle 3733 sched.pidle.set(_p_) 3734 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 3735 } 3736 3737 // Try get a p from _Pidle list. 3738 // Sched must be locked. 3739 // May run during STW, so write barriers are not allowed. 3740 //go:nowritebarrier 3741 func pidleget() *p { 3742 _p_ := sched.pidle.ptr() 3743 if _p_ != nil { 3744 sched.pidle = _p_.link 3745 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 3746 } 3747 return _p_ 3748 } 3749 3750 // runqempty returns true if _p_ has no Gs on its local run queue. 3751 // Note that this test is generally racy. 3752 func runqempty(_p_ *p) bool { 3753 return _p_.runqhead == _p_.runqtail && _p_.runnext == 0 3754 } 3755 3756 // To shake out latent assumptions about scheduling order, 3757 // we introduce some randomness into scheduling decisions 3758 // when running with the race detector. 3759 // The need for this was made obvious by changing the 3760 // (deterministic) scheduling order in Go 1.5 and breaking 3761 // many poorly-written tests. 3762 // With the randomness here, as long as the tests pass 3763 // consistently with -race, they shouldn't have latent scheduling 3764 // assumptions. 3765 const randomizeScheduler = raceenabled 3766 3767 // runqput tries to put g on the local runnable queue. 3768 // If next if false, runqput adds g to the tail of the runnable queue. 3769 // If next is true, runqput puts g in the _p_.runnext slot. 3770 // If the run queue is full, runnext puts g on the global queue. 3771 // Executed only by the owner P. 3772 func runqput(_p_ *p, gp *g, next bool) { 3773 if randomizeScheduler && next && fastrand1()%2 == 0 { 3774 next = false 3775 } 3776 3777 if next { 3778 retryNext: 3779 oldnext := _p_.runnext 3780 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 3781 goto retryNext 3782 } 3783 if oldnext == 0 { 3784 return 3785 } 3786 // Kick the old runnext out to the regular run queue. 3787 gp = oldnext.ptr() 3788 } 3789 3790 retry: 3791 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 3792 t := _p_.runqtail 3793 if t-h < uint32(len(_p_.runq)) { 3794 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 3795 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 3796 return 3797 } 3798 if runqputslow(_p_, gp, h, t) { 3799 return 3800 } 3801 // the queue is not full, now the put above must suceed 3802 goto retry 3803 } 3804 3805 // Put g and a batch of work from local runnable queue on global queue. 3806 // Executed only by the owner P. 3807 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 3808 var batch [len(_p_.runq)/2 + 1]*g 3809 3810 // First, grab a batch from local queue. 3811 n := t - h 3812 n = n / 2 3813 if n != uint32(len(_p_.runq)/2) { 3814 throw("runqputslow: queue is not full") 3815 } 3816 for i := uint32(0); i < n; i++ { 3817 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 3818 } 3819 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 3820 return false 3821 } 3822 batch[n] = gp 3823 3824 if randomizeScheduler { 3825 for i := uint32(1); i <= n; i++ { 3826 j := fastrand1() % (i + 1) 3827 batch[i], batch[j] = batch[j], batch[i] 3828 } 3829 } 3830 3831 // Link the goroutines. 3832 for i := uint32(0); i < n; i++ { 3833 batch[i].schedlink.set(batch[i+1]) 3834 } 3835 3836 // Now put the batch on global queue. 3837 lock(&sched.lock) 3838 globrunqputbatch(batch[0], batch[n], int32(n+1)) 3839 unlock(&sched.lock) 3840 return true 3841 } 3842 3843 // Get g from local runnable queue. 3844 // If inheritTime is true, gp should inherit the remaining time in the 3845 // current time slice. Otherwise, it should start a new time slice. 3846 // Executed only by the owner P. 3847 func runqget(_p_ *p) (gp *g, inheritTime bool) { 3848 // If there's a runnext, it's the next G to run. 3849 for { 3850 next := _p_.runnext 3851 if next == 0 { 3852 break 3853 } 3854 if _p_.runnext.cas(next, 0) { 3855 return next.ptr(), true 3856 } 3857 } 3858 3859 for { 3860 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 3861 t := _p_.runqtail 3862 if t == h { 3863 return nil, false 3864 } 3865 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 3866 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume 3867 return gp, false 3868 } 3869 } 3870 } 3871 3872 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 3873 // Batch is a ring buffer starting at batchHead. 3874 // Returns number of grabbed goroutines. 3875 // Can be executed by any P. 3876 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 3877 for { 3878 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 3879 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer 3880 n := t - h 3881 n = n - n/2 3882 if n == 0 { 3883 if stealRunNextG { 3884 // Try to steal from _p_.runnext. 3885 if next := _p_.runnext; next != 0 { 3886 // Sleep to ensure that _p_ isn't about to run the g we 3887 // are about to steal. 3888 // The important use case here is when the g running on _p_ 3889 // ready()s another g and then almost immediately blocks. 3890 // Instead of stealing runnext in this window, back off 3891 // to give _p_ a chance to schedule runnext. This will avoid 3892 // thrashing gs between different Ps. 3893 usleep(100) 3894 if !_p_.runnext.cas(next, 0) { 3895 continue 3896 } 3897 batch[batchHead%uint32(len(batch))] = next 3898 return 1 3899 } 3900 } 3901 return 0 3902 } 3903 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 3904 continue 3905 } 3906 for i := uint32(0); i < n; i++ { 3907 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 3908 batch[(batchHead+i)%uint32(len(batch))] = g 3909 } 3910 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 3911 return n 3912 } 3913 } 3914 } 3915 3916 // Steal half of elements from local runnable queue of p2 3917 // and put onto local runnable queue of p. 3918 // Returns one of the stolen elements (or nil if failed). 3919 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 3920 t := _p_.runqtail 3921 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 3922 if n == 0 { 3923 return nil 3924 } 3925 n-- 3926 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 3927 if n == 0 { 3928 return gp 3929 } 3930 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 3931 if t-h+n >= uint32(len(_p_.runq)) { 3932 throw("runqsteal: runq overflow") 3933 } 3934 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 3935 return gp 3936 } 3937 3938 func testSchedLocalQueue() { 3939 _p_ := new(p) 3940 gs := make([]g, len(_p_.runq)) 3941 for i := 0; i < len(_p_.runq); i++ { 3942 if g, _ := runqget(_p_); g != nil { 3943 throw("runq is not empty initially") 3944 } 3945 for j := 0; j < i; j++ { 3946 runqput(_p_, &gs[i], false) 3947 } 3948 for j := 0; j < i; j++ { 3949 if g, _ := runqget(_p_); g != &gs[i] { 3950 print("bad element at iter ", i, "/", j, "\n") 3951 throw("bad element") 3952 } 3953 } 3954 if g, _ := runqget(_p_); g != nil { 3955 throw("runq is not empty afterwards") 3956 } 3957 } 3958 } 3959 3960 func testSchedLocalQueueSteal() { 3961 p1 := new(p) 3962 p2 := new(p) 3963 gs := make([]g, len(p1.runq)) 3964 for i := 0; i < len(p1.runq); i++ { 3965 for j := 0; j < i; j++ { 3966 gs[j].sig = 0 3967 runqput(p1, &gs[j], false) 3968 } 3969 gp := runqsteal(p2, p1, true) 3970 s := 0 3971 if gp != nil { 3972 s++ 3973 gp.sig++ 3974 } 3975 for { 3976 gp, _ = runqget(p2) 3977 if gp == nil { 3978 break 3979 } 3980 s++ 3981 gp.sig++ 3982 } 3983 for { 3984 gp, _ = runqget(p1) 3985 if gp == nil { 3986 break 3987 } 3988 gp.sig++ 3989 } 3990 for j := 0; j < i; j++ { 3991 if gs[j].sig != 1 { 3992 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n") 3993 throw("bad element") 3994 } 3995 } 3996 if s != i/2 && s != i/2+1 { 3997 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n") 3998 throw("bad steal") 3999 } 4000 } 4001 } 4002 4003 //go:linkname setMaxThreads runtime/debug.setMaxThreads 4004 func setMaxThreads(in int) (out int) { 4005 lock(&sched.lock) 4006 out = int(sched.maxmcount) 4007 sched.maxmcount = int32(in) 4008 checkmcount() 4009 unlock(&sched.lock) 4010 return 4011 } 4012 4013 func haveexperiment(name string) bool { 4014 x := sys.Goexperiment 4015 for x != "" { 4016 xname := "" 4017 i := index(x, ",") 4018 if i < 0 { 4019 xname, x = x, "" 4020 } else { 4021 xname, x = x[:i], x[i+1:] 4022 } 4023 if xname == name { 4024 return true 4025 } 4026 } 4027 return false 4028 } 4029 4030 //go:nosplit 4031 func procPin() int { 4032 _g_ := getg() 4033 mp := _g_.m 4034 4035 mp.locks++ 4036 return int(mp.p.ptr().id) 4037 } 4038 4039 //go:nosplit 4040 func procUnpin() { 4041 _g_ := getg() 4042 _g_.m.locks-- 4043 } 4044 4045 //go:linkname sync_runtime_procPin sync.runtime_procPin 4046 //go:nosplit 4047 func sync_runtime_procPin() int { 4048 return procPin() 4049 } 4050 4051 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 4052 //go:nosplit 4053 func sync_runtime_procUnpin() { 4054 procUnpin() 4055 } 4056 4057 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 4058 //go:nosplit 4059 func sync_atomic_runtime_procPin() int { 4060 return procPin() 4061 } 4062 4063 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 4064 //go:nosplit 4065 func sync_atomic_runtime_procUnpin() { 4066 procUnpin() 4067 } 4068 4069 // Active spinning for sync.Mutex. 4070 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 4071 //go:nosplit 4072 func sync_runtime_canSpin(i int) bool { 4073 // sync.Mutex is cooperative, so we are conservative with spinning. 4074 // Spin only few times and only if running on a multicore machine and 4075 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 4076 // As opposed to runtime mutex we don't do passive spinning here, 4077 // because there can be work on global runq on on other Ps. 4078 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 4079 return false 4080 } 4081 if p := getg().m.p.ptr(); !runqempty(p) { 4082 return false 4083 } 4084 return true 4085 } 4086 4087 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 4088 //go:nosplit 4089 func sync_runtime_doSpin() { 4090 procyield(active_spin_cnt) 4091 }