github.com/filosottile/go@v0.0.0-20170906193555-dbed9972d994/src/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 var buildVersion = sys.TheVersion 14 15 // Goroutine scheduler 16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 17 // 18 // The main concepts are: 19 // G - goroutine. 20 // M - worker thread, or machine. 21 // P - processor, a resource that is required to execute Go code. 22 // M must have an associated P to execute Go code, however it can be 23 // blocked or in a syscall w/o an associated P. 24 // 25 // Design doc at https://golang.org/s/go11sched. 26 27 // Worker thread parking/unparking. 28 // We need to balance between keeping enough running worker threads to utilize 29 // available hardware parallelism and parking excessive running worker threads 30 // to conserve CPU resources and power. This is not simple for two reasons: 31 // (1) scheduler state is intentionally distributed (in particular, per-P work 32 // queues), so it is not possible to compute global predicates on fast paths; 33 // (2) for optimal thread management we would need to know the future (don't park 34 // a worker thread when a new goroutine will be readied in near future). 35 // 36 // Three rejected approaches that would work badly: 37 // 1. Centralize all scheduler state (would inhibit scalability). 38 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there 39 // is a spare P, unpark a thread and handoff it the thread and the goroutine. 40 // This would lead to thread state thrashing, as the thread that readied the 41 // goroutine can be out of work the very next moment, we will need to park it. 42 // Also, it would destroy locality of computation as we want to preserve 43 // dependent goroutines on the same thread; and introduce additional latency. 44 // 3. Unpark an additional thread whenever we ready a goroutine and there is an 45 // idle P, but don't do handoff. This would lead to excessive thread parking/ 46 // unparking as the additional threads will instantly park without discovering 47 // any work to do. 48 // 49 // The current approach: 50 // We unpark an additional thread when we ready a goroutine if (1) there is an 51 // idle P and there are no "spinning" worker threads. A worker thread is considered 52 // spinning if it is out of local work and did not find work in global run queue/ 53 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. 54 // Threads unparked this way are also considered spinning; we don't do goroutine 55 // handoff so such threads are out of work initially. Spinning threads do some 56 // spinning looking for work in per-P run queues before parking. If a spinning 57 // thread finds work it takes itself out of the spinning state and proceeds to 58 // execution. If it does not find work it takes itself out of the spinning state 59 // and then parks. 60 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark 61 // new threads when readying goroutines. To compensate for that, if the last spinning 62 // thread finds work and stops spinning, it must unpark a new spinning thread. 63 // This approach smooths out unjustified spikes of thread unparking, 64 // but at the same time guarantees eventual maximal CPU parallelism utilization. 65 // 66 // The main implementation complication is that we need to be very careful during 67 // spinning->non-spinning thread transition. This transition can race with submission 68 // of a new goroutine, and either one part or another needs to unpark another worker 69 // thread. If they both fail to do that, we can end up with semi-persistent CPU 70 // underutilization. The general pattern for goroutine readying is: submit a goroutine 71 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. 72 // The general pattern for spinning->non-spinning transition is: decrement nmspinning, 73 // #StoreLoad-style memory barrier, check all per-P work queues for new work. 74 // Note that all this complexity does not apply to global run queue as we are not 75 // sloppy about thread unparking when submitting to global queue. Also see comments 76 // for nmspinning manipulation. 77 78 var ( 79 m0 m 80 g0 g 81 raceprocctx0 uintptr 82 ) 83 84 //go:linkname runtime_init runtime.init 85 func runtime_init() 86 87 //go:linkname main_init main.init 88 func main_init() 89 90 // main_init_done is a signal used by cgocallbackg that initialization 91 // has been completed. It is made before _cgo_notify_runtime_init_done, 92 // so all cgo calls can rely on it existing. When main_init is complete, 93 // it is closed, meaning cgocallbackg can reliably receive from it. 94 var main_init_done chan bool 95 96 //go:linkname main_main main.main 97 func main_main() 98 99 // mainStarted indicates that the main M has started. 100 var mainStarted bool 101 102 // runtimeInitTime is the nanotime() at which the runtime started. 103 var runtimeInitTime int64 104 105 // Value to use for signal mask for newly created M's. 106 var initSigmask sigset 107 108 // The main goroutine. 109 func main() { 110 g := getg() 111 112 // Racectx of m0->g0 is used only as the parent of the main goroutine. 113 // It must not be used for anything else. 114 g.m.g0.racectx = 0 115 116 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 117 // Using decimal instead of binary GB and MB because 118 // they look nicer in the stack overflow failure message. 119 if sys.PtrSize == 8 { 120 maxstacksize = 1000000000 121 } else { 122 maxstacksize = 250000000 123 } 124 125 // Allow newproc to start new Ms. 126 mainStarted = true 127 128 systemstack(func() { 129 newm(sysmon, nil) 130 }) 131 132 // Lock the main goroutine onto this, the main OS thread, 133 // during initialization. Most programs won't care, but a few 134 // do require certain calls to be made by the main thread. 135 // Those can arrange for main.main to run in the main thread 136 // by calling runtime.LockOSThread during initialization 137 // to preserve the lock. 138 lockOSThread() 139 140 if g.m != &m0 { 141 throw("runtime.main not on m0") 142 } 143 144 runtime_init() // must be before defer 145 146 // Defer unlock so that runtime.Goexit during init does the unlock too. 147 needUnlock := true 148 defer func() { 149 if needUnlock { 150 unlockOSThread() 151 } 152 }() 153 154 // Record when the world started. Must be after runtime_init 155 // because nanotime on some platforms depends on startNano. 156 runtimeInitTime = nanotime() 157 158 gcenable() 159 160 main_init_done = make(chan bool) 161 if iscgo { 162 if _cgo_thread_start == nil { 163 throw("_cgo_thread_start missing") 164 } 165 if GOOS != "windows" { 166 if _cgo_setenv == nil { 167 throw("_cgo_setenv missing") 168 } 169 if _cgo_unsetenv == nil { 170 throw("_cgo_unsetenv missing") 171 } 172 } 173 if _cgo_notify_runtime_init_done == nil { 174 throw("_cgo_notify_runtime_init_done missing") 175 } 176 cgocall(_cgo_notify_runtime_init_done, nil) 177 } 178 179 fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 180 fn() 181 close(main_init_done) 182 183 needUnlock = false 184 unlockOSThread() 185 186 if isarchive || islibrary { 187 // A program compiled with -buildmode=c-archive or c-shared 188 // has a main, but it is not executed. 189 return 190 } 191 fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 192 fn() 193 if raceenabled { 194 racefini() 195 } 196 197 // Make racy client program work: if panicking on 198 // another goroutine at the same time as main returns, 199 // let the other goroutine finish printing the panic trace. 200 // Once it does, it will exit. See issues 3934 and 20018. 201 if atomic.Load(&runningPanicDefers) != 0 { 202 // Running deferred functions should not take long. 203 for c := 0; c < 1000; c++ { 204 if atomic.Load(&runningPanicDefers) == 0 { 205 break 206 } 207 Gosched() 208 } 209 } 210 if atomic.Load(&panicking) != 0 { 211 gopark(nil, nil, "panicwait", traceEvGoStop, 1) 212 } 213 214 exit(0) 215 for { 216 var x *int32 217 *x = 0 218 } 219 } 220 221 // os_beforeExit is called from os.Exit(0). 222 //go:linkname os_beforeExit os.runtime_beforeExit 223 func os_beforeExit() { 224 if raceenabled { 225 racefini() 226 } 227 } 228 229 // start forcegc helper goroutine 230 func init() { 231 go forcegchelper() 232 } 233 234 func forcegchelper() { 235 forcegc.g = getg() 236 for { 237 lock(&forcegc.lock) 238 if forcegc.idle != 0 { 239 throw("forcegc: phase error") 240 } 241 atomic.Store(&forcegc.idle, 1) 242 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1) 243 // this goroutine is explicitly resumed by sysmon 244 if debug.gctrace > 0 { 245 println("GC forced") 246 } 247 // Time-triggered, fully concurrent. 248 gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()}) 249 } 250 } 251 252 // Gosched yields the processor, allowing other goroutines to run. It does not 253 // suspend the current goroutine, so execution resumes automatically. 254 //go:nosplit 255 func Gosched() { 256 mcall(gosched_m) 257 } 258 259 // goschedguarded yields the processor like gosched, but also checks 260 // for forbidden states and opts out of the yield in those cases. 261 //go:nosplit 262 func goschedguarded() { 263 mcall(goschedguarded_m) 264 } 265 266 // Puts the current goroutine into a waiting state and calls unlockf. 267 // If unlockf returns false, the goroutine is resumed. 268 // unlockf must not access this G's stack, as it may be moved between 269 // the call to gopark and the call to unlockf. 270 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) { 271 mp := acquirem() 272 gp := mp.curg 273 status := readgstatus(gp) 274 if status != _Grunning && status != _Gscanrunning { 275 throw("gopark: bad g status") 276 } 277 mp.waitlock = lock 278 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) 279 gp.waitreason = reason 280 mp.waittraceev = traceEv 281 mp.waittraceskip = traceskip 282 releasem(mp) 283 // can't do anything that might move the G between Ms here. 284 mcall(park_m) 285 } 286 287 // Puts the current goroutine into a waiting state and unlocks the lock. 288 // The goroutine can be made runnable again by calling goready(gp). 289 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) { 290 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 291 } 292 293 func goready(gp *g, traceskip int) { 294 systemstack(func() { 295 ready(gp, traceskip, true) 296 }) 297 } 298 299 //go:nosplit 300 func acquireSudog() *sudog { 301 // Delicate dance: the semaphore implementation calls 302 // acquireSudog, acquireSudog calls new(sudog), 303 // new calls malloc, malloc can call the garbage collector, 304 // and the garbage collector calls the semaphore implementation 305 // in stopTheWorld. 306 // Break the cycle by doing acquirem/releasem around new(sudog). 307 // The acquirem/releasem increments m.locks during new(sudog), 308 // which keeps the garbage collector from being invoked. 309 mp := acquirem() 310 pp := mp.p.ptr() 311 if len(pp.sudogcache) == 0 { 312 lock(&sched.sudoglock) 313 // First, try to grab a batch from central cache. 314 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 315 s := sched.sudogcache 316 sched.sudogcache = s.next 317 s.next = nil 318 pp.sudogcache = append(pp.sudogcache, s) 319 } 320 unlock(&sched.sudoglock) 321 // If the central cache is empty, allocate a new one. 322 if len(pp.sudogcache) == 0 { 323 pp.sudogcache = append(pp.sudogcache, new(sudog)) 324 } 325 } 326 n := len(pp.sudogcache) 327 s := pp.sudogcache[n-1] 328 pp.sudogcache[n-1] = nil 329 pp.sudogcache = pp.sudogcache[:n-1] 330 if s.elem != nil { 331 throw("acquireSudog: found s.elem != nil in cache") 332 } 333 releasem(mp) 334 return s 335 } 336 337 //go:nosplit 338 func releaseSudog(s *sudog) { 339 if s.elem != nil { 340 throw("runtime: sudog with non-nil elem") 341 } 342 if s.isSelect { 343 throw("runtime: sudog with non-false isSelect") 344 } 345 if s.next != nil { 346 throw("runtime: sudog with non-nil next") 347 } 348 if s.prev != nil { 349 throw("runtime: sudog with non-nil prev") 350 } 351 if s.waitlink != nil { 352 throw("runtime: sudog with non-nil waitlink") 353 } 354 if s.c != nil { 355 throw("runtime: sudog with non-nil c") 356 } 357 gp := getg() 358 if gp.param != nil { 359 throw("runtime: releaseSudog with non-nil gp.param") 360 } 361 mp := acquirem() // avoid rescheduling to another P 362 pp := mp.p.ptr() 363 if len(pp.sudogcache) == cap(pp.sudogcache) { 364 // Transfer half of local cache to the central cache. 365 var first, last *sudog 366 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 367 n := len(pp.sudogcache) 368 p := pp.sudogcache[n-1] 369 pp.sudogcache[n-1] = nil 370 pp.sudogcache = pp.sudogcache[:n-1] 371 if first == nil { 372 first = p 373 } else { 374 last.next = p 375 } 376 last = p 377 } 378 lock(&sched.sudoglock) 379 last.next = sched.sudogcache 380 sched.sudogcache = first 381 unlock(&sched.sudoglock) 382 } 383 pp.sudogcache = append(pp.sudogcache, s) 384 releasem(mp) 385 } 386 387 // funcPC returns the entry PC of the function f. 388 // It assumes that f is a func value. Otherwise the behavior is undefined. 389 //go:nosplit 390 func funcPC(f interface{}) uintptr { 391 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize)) 392 } 393 394 // called from assembly 395 func badmcall(fn func(*g)) { 396 throw("runtime: mcall called on m->g0 stack") 397 } 398 399 func badmcall2(fn func(*g)) { 400 throw("runtime: mcall function returned") 401 } 402 403 func badreflectcall() { 404 panic(plainError("arg size to reflect.call more than 1GB")) 405 } 406 407 var badmorestackg0Msg = "fatal: morestack on g0\n" 408 409 //go:nosplit 410 //go:nowritebarrierrec 411 func badmorestackg0() { 412 sp := stringStructOf(&badmorestackg0Msg) 413 write(2, sp.str, int32(sp.len)) 414 } 415 416 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n" 417 418 //go:nosplit 419 //go:nowritebarrierrec 420 func badmorestackgsignal() { 421 sp := stringStructOf(&badmorestackgsignalMsg) 422 write(2, sp.str, int32(sp.len)) 423 } 424 425 //go:nosplit 426 func badctxt() { 427 throw("ctxt != 0") 428 } 429 430 func lockedOSThread() bool { 431 gp := getg() 432 return gp.lockedm != nil && gp.m.lockedg != nil 433 } 434 435 var ( 436 allgs []*g 437 allglock mutex 438 ) 439 440 func allgadd(gp *g) { 441 if readgstatus(gp) == _Gidle { 442 throw("allgadd: bad status Gidle") 443 } 444 445 lock(&allglock) 446 allgs = append(allgs, gp) 447 allglen = uintptr(len(allgs)) 448 unlock(&allglock) 449 } 450 451 const ( 452 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 453 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 454 _GoidCacheBatch = 16 455 ) 456 457 // The bootstrap sequence is: 458 // 459 // call osinit 460 // call schedinit 461 // make & queue new G 462 // call runtime·mstart 463 // 464 // The new G calls runtime·main. 465 func schedinit() { 466 // raceinit must be the first call to race detector. 467 // In particular, it must be done before mallocinit below calls racemapshadow. 468 _g_ := getg() 469 if raceenabled { 470 _g_.racectx, raceprocctx0 = raceinit() 471 } 472 473 sched.maxmcount = 10000 474 475 tracebackinit() 476 moduledataverify() 477 stackinit() 478 mallocinit() 479 mcommoninit(_g_.m) 480 alginit() // maps must not be used before this call 481 modulesinit() // provides activeModules 482 typelinksinit() // uses maps, activeModules 483 itabsinit() // uses activeModules 484 485 msigsave(_g_.m) 486 initSigmask = _g_.m.sigmask 487 488 goargs() 489 goenvs() 490 parsedebugvars() 491 gcinit() 492 493 sched.lastpoll = uint64(nanotime()) 494 procs := ncpu 495 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { 496 procs = n 497 } 498 if procs > _MaxGomaxprocs { 499 procs = _MaxGomaxprocs 500 } 501 if procresize(procs) != nil { 502 throw("unknown runnable goroutine during bootstrap") 503 } 504 505 if buildVersion == "" { 506 // Condition should never trigger. This code just serves 507 // to ensure runtime·buildVersion is kept in the resulting binary. 508 buildVersion = "unknown" 509 } 510 } 511 512 func dumpgstatus(gp *g) { 513 _g_ := getg() 514 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 515 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 516 } 517 518 func checkmcount() { 519 // sched lock is held 520 if sched.mcount > sched.maxmcount { 521 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 522 throw("thread exhaustion") 523 } 524 } 525 526 func mcommoninit(mp *m) { 527 _g_ := getg() 528 529 // g0 stack won't make sense for user (and is not necessary unwindable). 530 if _g_ != _g_.m.g0 { 531 callers(1, mp.createstack[:]) 532 } 533 534 mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks()) 535 if mp.fastrand == 0 { 536 mp.fastrand = 0x49f6428a 537 } 538 539 lock(&sched.lock) 540 mp.id = sched.mcount 541 sched.mcount++ 542 checkmcount() 543 mpreinit(mp) 544 if mp.gsignal != nil { 545 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 546 } 547 548 // Add to allm so garbage collector doesn't free g->m 549 // when it is just in a register or thread-local storage. 550 mp.alllink = allm 551 552 // NumCgoCall() iterates over allm w/o schedlock, 553 // so we need to publish it safely. 554 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 555 unlock(&sched.lock) 556 557 // Allocate memory to hold a cgo traceback if the cgo call crashes. 558 if iscgo || GOOS == "solaris" || GOOS == "windows" { 559 mp.cgoCallers = new(cgoCallers) 560 } 561 } 562 563 // Mark gp ready to run. 564 func ready(gp *g, traceskip int, next bool) { 565 if trace.enabled { 566 traceGoUnpark(gp, traceskip) 567 } 568 569 status := readgstatus(gp) 570 571 // Mark runnable. 572 _g_ := getg() 573 _g_.m.locks++ // disable preemption because it can be holding p in a local var 574 if status&^_Gscan != _Gwaiting { 575 dumpgstatus(gp) 576 throw("bad g->status in ready") 577 } 578 579 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 580 casgstatus(gp, _Gwaiting, _Grunnable) 581 runqput(_g_.m.p.ptr(), gp, next) 582 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 583 wakep() 584 } 585 _g_.m.locks-- 586 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack 587 _g_.stackguard0 = stackPreempt 588 } 589 } 590 591 func gcprocs() int32 { 592 // Figure out how many CPUs to use during GC. 593 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 594 lock(&sched.lock) 595 n := gomaxprocs 596 if n > ncpu { 597 n = ncpu 598 } 599 if n > _MaxGcproc { 600 n = _MaxGcproc 601 } 602 if n > sched.nmidle+1 { // one M is currently running 603 n = sched.nmidle + 1 604 } 605 unlock(&sched.lock) 606 return n 607 } 608 609 func needaddgcproc() bool { 610 lock(&sched.lock) 611 n := gomaxprocs 612 if n > ncpu { 613 n = ncpu 614 } 615 if n > _MaxGcproc { 616 n = _MaxGcproc 617 } 618 n -= sched.nmidle + 1 // one M is currently running 619 unlock(&sched.lock) 620 return n > 0 621 } 622 623 func helpgc(nproc int32) { 624 _g_ := getg() 625 lock(&sched.lock) 626 pos := 0 627 for n := int32(1); n < nproc; n++ { // one M is currently running 628 if allp[pos].mcache == _g_.m.mcache { 629 pos++ 630 } 631 mp := mget() 632 if mp == nil { 633 throw("gcprocs inconsistency") 634 } 635 mp.helpgc = n 636 mp.p.set(allp[pos]) 637 mp.mcache = allp[pos].mcache 638 pos++ 639 notewakeup(&mp.park) 640 } 641 unlock(&sched.lock) 642 } 643 644 // freezeStopWait is a large value that freezetheworld sets 645 // sched.stopwait to in order to request that all Gs permanently stop. 646 const freezeStopWait = 0x7fffffff 647 648 // freezing is set to non-zero if the runtime is trying to freeze the 649 // world. 650 var freezing uint32 651 652 // Similar to stopTheWorld but best-effort and can be called several times. 653 // There is no reverse operation, used during crashing. 654 // This function must not lock any mutexes. 655 func freezetheworld() { 656 atomic.Store(&freezing, 1) 657 // stopwait and preemption requests can be lost 658 // due to races with concurrently executing threads, 659 // so try several times 660 for i := 0; i < 5; i++ { 661 // this should tell the scheduler to not start any new goroutines 662 sched.stopwait = freezeStopWait 663 atomic.Store(&sched.gcwaiting, 1) 664 // this should stop running goroutines 665 if !preemptall() { 666 break // no running goroutines 667 } 668 usleep(1000) 669 } 670 // to be sure 671 usleep(1000) 672 preemptall() 673 usleep(1000) 674 } 675 676 func isscanstatus(status uint32) bool { 677 if status == _Gscan { 678 throw("isscanstatus: Bad status Gscan") 679 } 680 return status&_Gscan == _Gscan 681 } 682 683 // All reads and writes of g's status go through readgstatus, casgstatus 684 // castogscanstatus, casfrom_Gscanstatus. 685 //go:nosplit 686 func readgstatus(gp *g) uint32 { 687 return atomic.Load(&gp.atomicstatus) 688 } 689 690 // Ownership of gcscanvalid: 691 // 692 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), 693 // then gp owns gp.gcscanvalid, and other goroutines must not modify it. 694 // 695 // Otherwise, a second goroutine can lock the scan state by setting _Gscan 696 // in the status bit and then modify gcscanvalid, and then unlock the scan state. 697 // 698 // Note that the first condition implies an exception to the second: 699 // if a second goroutine changes gp's status to _Grunning|_Gscan, 700 // that second goroutine still does not have the right to modify gcscanvalid. 701 702 // The Gscanstatuses are acting like locks and this releases them. 703 // If it proves to be a performance hit we should be able to make these 704 // simple atomic stores but for now we are going to throw if 705 // we see an inconsistent state. 706 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 707 success := false 708 709 // Check that transition is valid. 710 switch oldval { 711 default: 712 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 713 dumpgstatus(gp) 714 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 715 case _Gscanrunnable, 716 _Gscanwaiting, 717 _Gscanrunning, 718 _Gscansyscall: 719 if newval == oldval&^_Gscan { 720 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 721 } 722 } 723 if !success { 724 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 725 dumpgstatus(gp) 726 throw("casfrom_Gscanstatus: gp->status is not in scan state") 727 } 728 } 729 730 // This will return false if the gp is not in the expected status and the cas fails. 731 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 732 func castogscanstatus(gp *g, oldval, newval uint32) bool { 733 switch oldval { 734 case _Grunnable, 735 _Grunning, 736 _Gwaiting, 737 _Gsyscall: 738 if newval == oldval|_Gscan { 739 return atomic.Cas(&gp.atomicstatus, oldval, newval) 740 } 741 } 742 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 743 throw("castogscanstatus") 744 panic("not reached") 745 } 746 747 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 748 // and casfrom_Gscanstatus instead. 749 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 750 // put it in the Gscan state is finished. 751 //go:nosplit 752 func casgstatus(gp *g, oldval, newval uint32) { 753 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 754 systemstack(func() { 755 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 756 throw("casgstatus: bad incoming values") 757 }) 758 } 759 760 if oldval == _Grunning && gp.gcscanvalid { 761 // If oldvall == _Grunning, then the actual status must be 762 // _Grunning or _Grunning|_Gscan; either way, 763 // we own gp.gcscanvalid, so it's safe to read. 764 // gp.gcscanvalid must not be true when we are running. 765 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n") 766 throw("casgstatus") 767 } 768 769 // See http://golang.org/cl/21503 for justification of the yield delay. 770 const yieldDelay = 5 * 1000 771 var nextYield int64 772 773 // loop if gp->atomicstatus is in a scan state giving 774 // GC time to finish and change the state to oldval. 775 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { 776 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 777 systemstack(func() { 778 throw("casgstatus: waiting for Gwaiting but is Grunnable") 779 }) 780 } 781 // Help GC if needed. 782 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { 783 // gp.preemptscan = false 784 // systemstack(func() { 785 // gcphasework(gp) 786 // }) 787 // } 788 // But meanwhile just yield. 789 if i == 0 { 790 nextYield = nanotime() + yieldDelay 791 } 792 if nanotime() < nextYield { 793 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { 794 procyield(1) 795 } 796 } else { 797 osyield() 798 nextYield = nanotime() + yieldDelay/2 799 } 800 } 801 if newval == _Grunning { 802 gp.gcscanvalid = false 803 } 804 } 805 806 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 807 // Returns old status. Cannot call casgstatus directly, because we are racing with an 808 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 809 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 810 // it would loop waiting for the status to go back to Gwaiting, which it never will. 811 //go:nosplit 812 func casgcopystack(gp *g) uint32 { 813 for { 814 oldstatus := readgstatus(gp) &^ _Gscan 815 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 816 throw("copystack: bad status, not Gwaiting or Grunnable") 817 } 818 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 819 return oldstatus 820 } 821 } 822 } 823 824 // scang blocks until gp's stack has been scanned. 825 // It might be scanned by scang or it might be scanned by the goroutine itself. 826 // Either way, the stack scan has completed when scang returns. 827 func scang(gp *g, gcw *gcWork) { 828 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone. 829 // Nothing is racing with us now, but gcscandone might be set to true left over 830 // from an earlier round of stack scanning (we scan twice per GC). 831 // We use gcscandone to record whether the scan has been done during this round. 832 833 gp.gcscandone = false 834 835 // See http://golang.org/cl/21503 for justification of the yield delay. 836 const yieldDelay = 10 * 1000 837 var nextYield int64 838 839 // Endeavor to get gcscandone set to true, 840 // either by doing the stack scan ourselves or by coercing gp to scan itself. 841 // gp.gcscandone can transition from false to true when we're not looking 842 // (if we asked for preemption), so any time we lock the status using 843 // castogscanstatus we have to double-check that the scan is still not done. 844 loop: 845 for i := 0; !gp.gcscandone; i++ { 846 switch s := readgstatus(gp); s { 847 default: 848 dumpgstatus(gp) 849 throw("stopg: invalid status") 850 851 case _Gdead: 852 // No stack. 853 gp.gcscandone = true 854 break loop 855 856 case _Gcopystack: 857 // Stack being switched. Go around again. 858 859 case _Grunnable, _Gsyscall, _Gwaiting: 860 // Claim goroutine by setting scan bit. 861 // Racing with execution or readying of gp. 862 // The scan bit keeps them from running 863 // the goroutine until we're done. 864 if castogscanstatus(gp, s, s|_Gscan) { 865 if !gp.gcscandone { 866 scanstack(gp, gcw) 867 gp.gcscandone = true 868 } 869 restartg(gp) 870 break loop 871 } 872 873 case _Gscanwaiting: 874 // newstack is doing a scan for us right now. Wait. 875 876 case _Grunning: 877 // Goroutine running. Try to preempt execution so it can scan itself. 878 // The preemption handler (in newstack) does the actual scan. 879 880 // Optimization: if there is already a pending preemption request 881 // (from the previous loop iteration), don't bother with the atomics. 882 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt { 883 break 884 } 885 886 // Ask for preemption and self scan. 887 if castogscanstatus(gp, _Grunning, _Gscanrunning) { 888 if !gp.gcscandone { 889 gp.preemptscan = true 890 gp.preempt = true 891 gp.stackguard0 = stackPreempt 892 } 893 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning) 894 } 895 } 896 897 if i == 0 { 898 nextYield = nanotime() + yieldDelay 899 } 900 if nanotime() < nextYield { 901 procyield(10) 902 } else { 903 osyield() 904 nextYield = nanotime() + yieldDelay/2 905 } 906 } 907 908 gp.preemptscan = false // cancel scan request if no longer needed 909 } 910 911 // The GC requests that this routine be moved from a scanmumble state to a mumble state. 912 func restartg(gp *g) { 913 s := readgstatus(gp) 914 switch s { 915 default: 916 dumpgstatus(gp) 917 throw("restartg: unexpected status") 918 919 case _Gdead: 920 // ok 921 922 case _Gscanrunnable, 923 _Gscanwaiting, 924 _Gscansyscall: 925 casfrom_Gscanstatus(gp, s, s&^_Gscan) 926 } 927 } 928 929 // stopTheWorld stops all P's from executing goroutines, interrupting 930 // all goroutines at GC safe points and records reason as the reason 931 // for the stop. On return, only the current goroutine's P is running. 932 // stopTheWorld must not be called from a system stack and the caller 933 // must not hold worldsema. The caller must call startTheWorld when 934 // other P's should resume execution. 935 // 936 // stopTheWorld is safe for multiple goroutines to call at the 937 // same time. Each will execute its own stop, and the stops will 938 // be serialized. 939 // 940 // This is also used by routines that do stack dumps. If the system is 941 // in panic or being exited, this may not reliably stop all 942 // goroutines. 943 func stopTheWorld(reason string) { 944 semacquire(&worldsema) 945 getg().m.preemptoff = reason 946 systemstack(stopTheWorldWithSema) 947 } 948 949 // startTheWorld undoes the effects of stopTheWorld. 950 func startTheWorld() { 951 systemstack(func() { startTheWorldWithSema(false) }) 952 // worldsema must be held over startTheWorldWithSema to ensure 953 // gomaxprocs cannot change while worldsema is held. 954 semrelease(&worldsema) 955 getg().m.preemptoff = "" 956 } 957 958 // Holding worldsema grants an M the right to try to stop the world 959 // and prevents gomaxprocs from changing concurrently. 960 var worldsema uint32 = 1 961 962 // stopTheWorldWithSema is the core implementation of stopTheWorld. 963 // The caller is responsible for acquiring worldsema and disabling 964 // preemption first and then should stopTheWorldWithSema on the system 965 // stack: 966 // 967 // semacquire(&worldsema, 0) 968 // m.preemptoff = "reason" 969 // systemstack(stopTheWorldWithSema) 970 // 971 // When finished, the caller must either call startTheWorld or undo 972 // these three operations separately: 973 // 974 // m.preemptoff = "" 975 // systemstack(startTheWorldWithSema) 976 // semrelease(&worldsema) 977 // 978 // It is allowed to acquire worldsema once and then execute multiple 979 // startTheWorldWithSema/stopTheWorldWithSema pairs. 980 // Other P's are able to execute between successive calls to 981 // startTheWorldWithSema and stopTheWorldWithSema. 982 // Holding worldsema causes any other goroutines invoking 983 // stopTheWorld to block. 984 func stopTheWorldWithSema() { 985 _g_ := getg() 986 987 // If we hold a lock, then we won't be able to stop another M 988 // that is blocked trying to acquire the lock. 989 if _g_.m.locks > 0 { 990 throw("stopTheWorld: holding locks") 991 } 992 993 lock(&sched.lock) 994 sched.stopwait = gomaxprocs 995 atomic.Store(&sched.gcwaiting, 1) 996 preemptall() 997 // stop current P 998 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 999 sched.stopwait-- 1000 // try to retake all P's in Psyscall status 1001 for i := 0; i < int(gomaxprocs); i++ { 1002 p := allp[i] 1003 s := p.status 1004 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 1005 if trace.enabled { 1006 traceGoSysBlock(p) 1007 traceProcStop(p) 1008 } 1009 p.syscalltick++ 1010 sched.stopwait-- 1011 } 1012 } 1013 // stop idle P's 1014 for { 1015 p := pidleget() 1016 if p == nil { 1017 break 1018 } 1019 p.status = _Pgcstop 1020 sched.stopwait-- 1021 } 1022 wait := sched.stopwait > 0 1023 unlock(&sched.lock) 1024 1025 // wait for remaining P's to stop voluntarily 1026 if wait { 1027 for { 1028 // wait for 100us, then try to re-preempt in case of any races 1029 if notetsleep(&sched.stopnote, 100*1000) { 1030 noteclear(&sched.stopnote) 1031 break 1032 } 1033 preemptall() 1034 } 1035 } 1036 1037 // sanity checks 1038 bad := "" 1039 if sched.stopwait != 0 { 1040 bad = "stopTheWorld: not stopped (stopwait != 0)" 1041 } else { 1042 for i := 0; i < int(gomaxprocs); i++ { 1043 p := allp[i] 1044 if p.status != _Pgcstop { 1045 bad = "stopTheWorld: not stopped (status != _Pgcstop)" 1046 } 1047 } 1048 } 1049 if atomic.Load(&freezing) != 0 { 1050 // Some other thread is panicking. This can cause the 1051 // sanity checks above to fail if the panic happens in 1052 // the signal handler on a stopped thread. Either way, 1053 // we should halt this thread. 1054 lock(&deadlock) 1055 lock(&deadlock) 1056 } 1057 if bad != "" { 1058 throw(bad) 1059 } 1060 } 1061 1062 func mhelpgc() { 1063 _g_ := getg() 1064 _g_.m.helpgc = -1 1065 } 1066 1067 func startTheWorldWithSema(emitTraceEvent bool) int64 { 1068 _g_ := getg() 1069 1070 _g_.m.locks++ // disable preemption because it can be holding p in a local var 1071 gp := netpoll(false) // non-blocking 1072 injectglist(gp) 1073 add := needaddgcproc() 1074 lock(&sched.lock) 1075 1076 procs := gomaxprocs 1077 if newprocs != 0 { 1078 procs = newprocs 1079 newprocs = 0 1080 } 1081 p1 := procresize(procs) 1082 sched.gcwaiting = 0 1083 if sched.sysmonwait != 0 { 1084 sched.sysmonwait = 0 1085 notewakeup(&sched.sysmonnote) 1086 } 1087 unlock(&sched.lock) 1088 1089 for p1 != nil { 1090 p := p1 1091 p1 = p1.link.ptr() 1092 if p.m != 0 { 1093 mp := p.m.ptr() 1094 p.m = 0 1095 if mp.nextp != 0 { 1096 throw("startTheWorld: inconsistent mp->nextp") 1097 } 1098 mp.nextp.set(p) 1099 notewakeup(&mp.park) 1100 } else { 1101 // Start M to run P. Do not start another M below. 1102 newm(nil, p) 1103 add = false 1104 } 1105 } 1106 1107 // Capture start-the-world time before doing clean-up tasks. 1108 startTime := nanotime() 1109 if emitTraceEvent { 1110 traceGCSTWDone() 1111 } 1112 1113 // Wakeup an additional proc in case we have excessive runnable goroutines 1114 // in local queues or in the global queue. If we don't, the proc will park itself. 1115 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 1116 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 1117 wakep() 1118 } 1119 1120 if add { 1121 // If GC could have used another helper proc, start one now, 1122 // in the hope that it will be available next time. 1123 // It would have been even better to start it before the collection, 1124 // but doing so requires allocating memory, so it's tricky to 1125 // coordinate. This lazy approach works out in practice: 1126 // we don't mind if the first couple gc rounds don't have quite 1127 // the maximum number of procs. 1128 newm(mhelpgc, nil) 1129 } 1130 _g_.m.locks-- 1131 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1132 _g_.stackguard0 = stackPreempt 1133 } 1134 1135 return startTime 1136 } 1137 1138 // Called to start an M. 1139 //go:nosplit 1140 func mstart() { 1141 _g_ := getg() 1142 1143 if _g_.stack.lo == 0 { 1144 // Initialize stack bounds from system stack. 1145 // Cgo may have left stack size in stack.hi. 1146 size := _g_.stack.hi 1147 if size == 0 { 1148 size = 8192 * sys.StackGuardMultiplier 1149 } 1150 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1151 _g_.stack.lo = _g_.stack.hi - size + 1024 1152 } 1153 // Initialize stack guards so that we can start calling 1154 // both Go and C functions with stack growth prologues. 1155 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1156 _g_.stackguard1 = _g_.stackguard0 1157 mstart1() 1158 } 1159 1160 func mstart1() { 1161 _g_ := getg() 1162 1163 if _g_ != _g_.m.g0 { 1164 throw("bad runtime·mstart") 1165 } 1166 1167 // Record top of stack for use by mcall. 1168 // Once we call schedule we're never coming back, 1169 // so other calls can reuse this stack space. 1170 gosave(&_g_.m.g0.sched) 1171 _g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used 1172 asminit() 1173 minit() 1174 1175 // Install signal handlers; after minit so that minit can 1176 // prepare the thread to be able to handle the signals. 1177 if _g_.m == &m0 { 1178 // Create an extra M for callbacks on threads not created by Go. 1179 if iscgo && !cgoHasExtraM { 1180 cgoHasExtraM = true 1181 newextram() 1182 } 1183 initsig(false) 1184 } 1185 1186 if fn := _g_.m.mstartfn; fn != nil { 1187 fn() 1188 } 1189 1190 if _g_.m.helpgc != 0 { 1191 _g_.m.helpgc = 0 1192 stopm() 1193 } else if _g_.m != &m0 { 1194 acquirep(_g_.m.nextp.ptr()) 1195 _g_.m.nextp = 0 1196 } 1197 schedule() 1198 } 1199 1200 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1201 // If a P is currently executing code, this will bring the P to a GC 1202 // safe point and execute fn on that P. If the P is not executing code 1203 // (it is idle or in a syscall), this will call fn(p) directly while 1204 // preventing the P from exiting its state. This does not ensure that 1205 // fn will run on every CPU executing Go code, but it acts as a global 1206 // memory barrier. GC uses this as a "ragged barrier." 1207 // 1208 // The caller must hold worldsema. 1209 // 1210 //go:systemstack 1211 func forEachP(fn func(*p)) { 1212 mp := acquirem() 1213 _p_ := getg().m.p.ptr() 1214 1215 lock(&sched.lock) 1216 if sched.safePointWait != 0 { 1217 throw("forEachP: sched.safePointWait != 0") 1218 } 1219 sched.safePointWait = gomaxprocs - 1 1220 sched.safePointFn = fn 1221 1222 // Ask all Ps to run the safe point function. 1223 for _, p := range allp[:gomaxprocs] { 1224 if p != _p_ { 1225 atomic.Store(&p.runSafePointFn, 1) 1226 } 1227 } 1228 preemptall() 1229 1230 // Any P entering _Pidle or _Psyscall from now on will observe 1231 // p.runSafePointFn == 1 and will call runSafePointFn when 1232 // changing its status to _Pidle/_Psyscall. 1233 1234 // Run safe point function for all idle Ps. sched.pidle will 1235 // not change because we hold sched.lock. 1236 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1237 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1238 fn(p) 1239 sched.safePointWait-- 1240 } 1241 } 1242 1243 wait := sched.safePointWait > 0 1244 unlock(&sched.lock) 1245 1246 // Run fn for the current P. 1247 fn(_p_) 1248 1249 // Force Ps currently in _Psyscall into _Pidle and hand them 1250 // off to induce safe point function execution. 1251 for i := 0; i < int(gomaxprocs); i++ { 1252 p := allp[i] 1253 s := p.status 1254 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1255 if trace.enabled { 1256 traceGoSysBlock(p) 1257 traceProcStop(p) 1258 } 1259 p.syscalltick++ 1260 handoffp(p) 1261 } 1262 } 1263 1264 // Wait for remaining Ps to run fn. 1265 if wait { 1266 for { 1267 // Wait for 100us, then try to re-preempt in 1268 // case of any races. 1269 // 1270 // Requires system stack. 1271 if notetsleep(&sched.safePointNote, 100*1000) { 1272 noteclear(&sched.safePointNote) 1273 break 1274 } 1275 preemptall() 1276 } 1277 } 1278 if sched.safePointWait != 0 { 1279 throw("forEachP: not done") 1280 } 1281 for i := 0; i < int(gomaxprocs); i++ { 1282 p := allp[i] 1283 if p.runSafePointFn != 0 { 1284 throw("forEachP: P did not run fn") 1285 } 1286 } 1287 1288 lock(&sched.lock) 1289 sched.safePointFn = nil 1290 unlock(&sched.lock) 1291 releasem(mp) 1292 } 1293 1294 // runSafePointFn runs the safe point function, if any, for this P. 1295 // This should be called like 1296 // 1297 // if getg().m.p.runSafePointFn != 0 { 1298 // runSafePointFn() 1299 // } 1300 // 1301 // runSafePointFn must be checked on any transition in to _Pidle or 1302 // _Psyscall to avoid a race where forEachP sees that the P is running 1303 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1304 // nor the P run the safe-point function. 1305 func runSafePointFn() { 1306 p := getg().m.p.ptr() 1307 // Resolve the race between forEachP running the safe-point 1308 // function on this P's behalf and this P running the 1309 // safe-point function directly. 1310 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1311 return 1312 } 1313 sched.safePointFn(p) 1314 lock(&sched.lock) 1315 sched.safePointWait-- 1316 if sched.safePointWait == 0 { 1317 notewakeup(&sched.safePointNote) 1318 } 1319 unlock(&sched.lock) 1320 } 1321 1322 // When running with cgo, we call _cgo_thread_start 1323 // to start threads for us so that we can play nicely with 1324 // foreign code. 1325 var cgoThreadStart unsafe.Pointer 1326 1327 type cgothreadstart struct { 1328 g guintptr 1329 tls *uint64 1330 fn unsafe.Pointer 1331 } 1332 1333 // Allocate a new m unassociated with any thread. 1334 // Can use p for allocation context if needed. 1335 // fn is recorded as the new m's m.mstartfn. 1336 // 1337 // This function is allowed to have write barriers even if the caller 1338 // isn't because it borrows _p_. 1339 // 1340 //go:yeswritebarrierrec 1341 func allocm(_p_ *p, fn func()) *m { 1342 _g_ := getg() 1343 _g_.m.locks++ // disable GC because it can be called from sysmon 1344 if _g_.m.p == 0 { 1345 acquirep(_p_) // temporarily borrow p for mallocs in this function 1346 } 1347 mp := new(m) 1348 mp.mstartfn = fn 1349 mcommoninit(mp) 1350 1351 // In case of cgo or Solaris, pthread_create will make us a stack. 1352 // Windows and Plan 9 will layout sched stack on OS stack. 1353 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" { 1354 mp.g0 = malg(-1) 1355 } else { 1356 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1357 } 1358 mp.g0.m = mp 1359 1360 if _p_ == _g_.m.p.ptr() { 1361 releasep() 1362 } 1363 _g_.m.locks-- 1364 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1365 _g_.stackguard0 = stackPreempt 1366 } 1367 1368 return mp 1369 } 1370 1371 // needm is called when a cgo callback happens on a 1372 // thread without an m (a thread not created by Go). 1373 // In this case, needm is expected to find an m to use 1374 // and return with m, g initialized correctly. 1375 // Since m and g are not set now (likely nil, but see below) 1376 // needm is limited in what routines it can call. In particular 1377 // it can only call nosplit functions (textflag 7) and cannot 1378 // do any scheduling that requires an m. 1379 // 1380 // In order to avoid needing heavy lifting here, we adopt 1381 // the following strategy: there is a stack of available m's 1382 // that can be stolen. Using compare-and-swap 1383 // to pop from the stack has ABA races, so we simulate 1384 // a lock by doing an exchange (via casp) to steal the stack 1385 // head and replace the top pointer with MLOCKED (1). 1386 // This serves as a simple spin lock that we can use even 1387 // without an m. The thread that locks the stack in this way 1388 // unlocks the stack by storing a valid stack head pointer. 1389 // 1390 // In order to make sure that there is always an m structure 1391 // available to be stolen, we maintain the invariant that there 1392 // is always one more than needed. At the beginning of the 1393 // program (if cgo is in use) the list is seeded with a single m. 1394 // If needm finds that it has taken the last m off the list, its job 1395 // is - once it has installed its own m so that it can do things like 1396 // allocate memory - to create a spare m and put it on the list. 1397 // 1398 // Each of these extra m's also has a g0 and a curg that are 1399 // pressed into service as the scheduling stack and current 1400 // goroutine for the duration of the cgo callback. 1401 // 1402 // When the callback is done with the m, it calls dropm to 1403 // put the m back on the list. 1404 //go:nosplit 1405 func needm(x byte) { 1406 if iscgo && !cgoHasExtraM { 1407 // Can happen if C/C++ code calls Go from a global ctor. 1408 // Can not throw, because scheduler is not initialized yet. 1409 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1410 exit(1) 1411 } 1412 1413 // Lock extra list, take head, unlock popped list. 1414 // nilokay=false is safe here because of the invariant above, 1415 // that the extra list always contains or will soon contain 1416 // at least one m. 1417 mp := lockextra(false) 1418 1419 // Set needextram when we've just emptied the list, 1420 // so that the eventual call into cgocallbackg will 1421 // allocate a new m for the extra list. We delay the 1422 // allocation until then so that it can be done 1423 // after exitsyscall makes sure it is okay to be 1424 // running at all (that is, there's no garbage collection 1425 // running right now). 1426 mp.needextram = mp.schedlink == 0 1427 extraMCount-- 1428 unlockextra(mp.schedlink.ptr()) 1429 1430 // Save and block signals before installing g. 1431 // Once g is installed, any incoming signals will try to execute, 1432 // but we won't have the sigaltstack settings and other data 1433 // set up appropriately until the end of minit, which will 1434 // unblock the signals. This is the same dance as when 1435 // starting a new m to run Go code via newosproc. 1436 msigsave(mp) 1437 sigblock() 1438 1439 // Install g (= m->g0) and set the stack bounds 1440 // to match the current stack. We don't actually know 1441 // how big the stack is, like we don't know how big any 1442 // scheduling stack is, but we assume there's at least 32 kB, 1443 // which is more than enough for us. 1444 setg(mp.g0) 1445 _g_ := getg() 1446 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 1447 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 1448 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1449 1450 // Initialize this thread to use the m. 1451 asminit() 1452 minit() 1453 1454 // mp.curg is now a real goroutine. 1455 casgstatus(mp.curg, _Gdead, _Gsyscall) 1456 atomic.Xadd(&sched.ngsys, -1) 1457 } 1458 1459 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 1460 1461 // newextram allocates m's and puts them on the extra list. 1462 // It is called with a working local m, so that it can do things 1463 // like call schedlock and allocate. 1464 func newextram() { 1465 c := atomic.Xchg(&extraMWaiters, 0) 1466 if c > 0 { 1467 for i := uint32(0); i < c; i++ { 1468 oneNewExtraM() 1469 } 1470 } else { 1471 // Make sure there is at least one extra M. 1472 mp := lockextra(true) 1473 unlockextra(mp) 1474 if mp == nil { 1475 oneNewExtraM() 1476 } 1477 } 1478 } 1479 1480 // oneNewExtraM allocates an m and puts it on the extra list. 1481 func oneNewExtraM() { 1482 // Create extra goroutine locked to extra m. 1483 // The goroutine is the context in which the cgo callback will run. 1484 // The sched.pc will never be returned to, but setting it to 1485 // goexit makes clear to the traceback routines where 1486 // the goroutine stack ends. 1487 mp := allocm(nil, nil) 1488 gp := malg(4096) 1489 gp.sched.pc = funcPC(goexit) + sys.PCQuantum 1490 gp.sched.sp = gp.stack.hi 1491 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame 1492 gp.sched.lr = 0 1493 gp.sched.g = guintptr(unsafe.Pointer(gp)) 1494 gp.syscallpc = gp.sched.pc 1495 gp.syscallsp = gp.sched.sp 1496 gp.stktopsp = gp.sched.sp 1497 gp.gcscanvalid = true 1498 gp.gcscandone = true 1499 // malg returns status as _Gidle. Change to _Gdead before 1500 // adding to allg where GC can see it. We use _Gdead to hide 1501 // this from tracebacks and stack scans since it isn't a 1502 // "real" goroutine until needm grabs it. 1503 casgstatus(gp, _Gidle, _Gdead) 1504 gp.m = mp 1505 mp.curg = gp 1506 mp.locked = _LockInternal 1507 mp.lockedg = gp 1508 gp.lockedm = mp 1509 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 1510 if raceenabled { 1511 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum) 1512 } 1513 // put on allg for garbage collector 1514 allgadd(gp) 1515 1516 // gp is now on the allg list, but we don't want it to be 1517 // counted by gcount. It would be more "proper" to increment 1518 // sched.ngfree, but that requires locking. Incrementing ngsys 1519 // has the same effect. 1520 atomic.Xadd(&sched.ngsys, +1) 1521 1522 // Add m to the extra list. 1523 mnext := lockextra(true) 1524 mp.schedlink.set(mnext) 1525 extraMCount++ 1526 unlockextra(mp) 1527 } 1528 1529 // dropm is called when a cgo callback has called needm but is now 1530 // done with the callback and returning back into the non-Go thread. 1531 // It puts the current m back onto the extra list. 1532 // 1533 // The main expense here is the call to signalstack to release the 1534 // m's signal stack, and then the call to needm on the next callback 1535 // from this thread. It is tempting to try to save the m for next time, 1536 // which would eliminate both these costs, but there might not be 1537 // a next time: the current thread (which Go does not control) might exit. 1538 // If we saved the m for that thread, there would be an m leak each time 1539 // such a thread exited. Instead, we acquire and release an m on each 1540 // call. These should typically not be scheduling operations, just a few 1541 // atomics, so the cost should be small. 1542 // 1543 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1544 // variable using pthread_key_create. Unlike the pthread keys we already use 1545 // on OS X, this dummy key would never be read by Go code. It would exist 1546 // only so that we could register at thread-exit-time destructor. 1547 // That destructor would put the m back onto the extra list. 1548 // This is purely a performance optimization. The current version, 1549 // in which dropm happens on each cgo call, is still correct too. 1550 // We may have to keep the current version on systems with cgo 1551 // but without pthreads, like Windows. 1552 func dropm() { 1553 // Clear m and g, and return m to the extra list. 1554 // After the call to setg we can only call nosplit functions 1555 // with no pointer manipulation. 1556 mp := getg().m 1557 1558 // Return mp.curg to dead state. 1559 casgstatus(mp.curg, _Gsyscall, _Gdead) 1560 atomic.Xadd(&sched.ngsys, +1) 1561 1562 // Block signals before unminit. 1563 // Unminit unregisters the signal handling stack (but needs g on some systems). 1564 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 1565 // It's important not to try to handle a signal between those two steps. 1566 sigmask := mp.sigmask 1567 sigblock() 1568 unminit() 1569 1570 mnext := lockextra(true) 1571 extraMCount++ 1572 mp.schedlink.set(mnext) 1573 1574 setg(nil) 1575 1576 // Commit the release of mp. 1577 unlockextra(mp) 1578 1579 msigrestore(sigmask) 1580 } 1581 1582 // A helper function for EnsureDropM. 1583 func getm() uintptr { 1584 return uintptr(unsafe.Pointer(getg().m)) 1585 } 1586 1587 var extram uintptr 1588 var extraMCount uint32 // Protected by lockextra 1589 var extraMWaiters uint32 1590 1591 // lockextra locks the extra list and returns the list head. 1592 // The caller must unlock the list by storing a new list head 1593 // to extram. If nilokay is true, then lockextra will 1594 // return a nil list head if that's what it finds. If nilokay is false, 1595 // lockextra will keep waiting until the list head is no longer nil. 1596 //go:nosplit 1597 func lockextra(nilokay bool) *m { 1598 const locked = 1 1599 1600 incr := false 1601 for { 1602 old := atomic.Loaduintptr(&extram) 1603 if old == locked { 1604 yield := osyield 1605 yield() 1606 continue 1607 } 1608 if old == 0 && !nilokay { 1609 if !incr { 1610 // Add 1 to the number of threads 1611 // waiting for an M. 1612 // This is cleared by newextram. 1613 atomic.Xadd(&extraMWaiters, 1) 1614 incr = true 1615 } 1616 usleep(1) 1617 continue 1618 } 1619 if atomic.Casuintptr(&extram, old, locked) { 1620 return (*m)(unsafe.Pointer(old)) 1621 } 1622 yield := osyield 1623 yield() 1624 continue 1625 } 1626 } 1627 1628 //go:nosplit 1629 func unlockextra(mp *m) { 1630 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1631 } 1632 1633 // execLock serializes exec and clone to avoid bugs or unspecified behaviour 1634 // around exec'ing while creating/destroying threads. See issue #19546. 1635 var execLock rwmutex 1636 1637 // Create a new m. It will start off with a call to fn, or else the scheduler. 1638 // fn needs to be static and not a heap allocated closure. 1639 // May run with m.p==nil, so write barriers are not allowed. 1640 //go:nowritebarrierrec 1641 func newm(fn func(), _p_ *p) { 1642 mp := allocm(_p_, fn) 1643 mp.nextp.set(_p_) 1644 mp.sigmask = initSigmask 1645 if iscgo { 1646 var ts cgothreadstart 1647 if _cgo_thread_start == nil { 1648 throw("_cgo_thread_start missing") 1649 } 1650 ts.g.set(mp.g0) 1651 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1652 ts.fn = unsafe.Pointer(funcPC(mstart)) 1653 if msanenabled { 1654 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 1655 } 1656 execLock.rlock() // Prevent process clone. 1657 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1658 execLock.runlock() 1659 return 1660 } 1661 execLock.rlock() // Prevent process clone. 1662 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi)) 1663 execLock.runlock() 1664 } 1665 1666 // Stops execution of the current m until new work is available. 1667 // Returns with acquired P. 1668 func stopm() { 1669 _g_ := getg() 1670 1671 if _g_.m.locks != 0 { 1672 throw("stopm holding locks") 1673 } 1674 if _g_.m.p != 0 { 1675 throw("stopm holding p") 1676 } 1677 if _g_.m.spinning { 1678 throw("stopm spinning") 1679 } 1680 1681 retry: 1682 lock(&sched.lock) 1683 mput(_g_.m) 1684 unlock(&sched.lock) 1685 notesleep(&_g_.m.park) 1686 noteclear(&_g_.m.park) 1687 if _g_.m.helpgc != 0 { 1688 gchelper() 1689 _g_.m.helpgc = 0 1690 _g_.m.mcache = nil 1691 _g_.m.p = 0 1692 goto retry 1693 } 1694 acquirep(_g_.m.nextp.ptr()) 1695 _g_.m.nextp = 0 1696 } 1697 1698 func mspinning() { 1699 // startm's caller incremented nmspinning. Set the new M's spinning. 1700 getg().m.spinning = true 1701 } 1702 1703 // Schedules some M to run the p (creates an M if necessary). 1704 // If p==nil, tries to get an idle P, if no idle P's does nothing. 1705 // May run with m.p==nil, so write barriers are not allowed. 1706 // If spinning is set, the caller has incremented nmspinning and startm will 1707 // either decrement nmspinning or set m.spinning in the newly started M. 1708 //go:nowritebarrierrec 1709 func startm(_p_ *p, spinning bool) { 1710 lock(&sched.lock) 1711 if _p_ == nil { 1712 _p_ = pidleget() 1713 if _p_ == nil { 1714 unlock(&sched.lock) 1715 if spinning { 1716 // The caller incremented nmspinning, but there are no idle Ps, 1717 // so it's okay to just undo the increment and give up. 1718 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1719 throw("startm: negative nmspinning") 1720 } 1721 } 1722 return 1723 } 1724 } 1725 mp := mget() 1726 unlock(&sched.lock) 1727 if mp == nil { 1728 var fn func() 1729 if spinning { 1730 // The caller incremented nmspinning, so set m.spinning in the new M. 1731 fn = mspinning 1732 } 1733 newm(fn, _p_) 1734 return 1735 } 1736 if mp.spinning { 1737 throw("startm: m is spinning") 1738 } 1739 if mp.nextp != 0 { 1740 throw("startm: m has p") 1741 } 1742 if spinning && !runqempty(_p_) { 1743 throw("startm: p has runnable gs") 1744 } 1745 // The caller incremented nmspinning, so set m.spinning in the new M. 1746 mp.spinning = spinning 1747 mp.nextp.set(_p_) 1748 notewakeup(&mp.park) 1749 } 1750 1751 // Hands off P from syscall or locked M. 1752 // Always runs without a P, so write barriers are not allowed. 1753 //go:nowritebarrierrec 1754 func handoffp(_p_ *p) { 1755 // handoffp must start an M in any situation where 1756 // findrunnable would return a G to run on _p_. 1757 1758 // if it has local work, start it straight away 1759 if !runqempty(_p_) || sched.runqsize != 0 { 1760 startm(_p_, false) 1761 return 1762 } 1763 // if it has GC work, start it straight away 1764 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 1765 startm(_p_, false) 1766 return 1767 } 1768 // no local work, check that there are no spinning/idle M's, 1769 // otherwise our help is not required 1770 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 1771 startm(_p_, true) 1772 return 1773 } 1774 lock(&sched.lock) 1775 if sched.gcwaiting != 0 { 1776 _p_.status = _Pgcstop 1777 sched.stopwait-- 1778 if sched.stopwait == 0 { 1779 notewakeup(&sched.stopnote) 1780 } 1781 unlock(&sched.lock) 1782 return 1783 } 1784 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 1785 sched.safePointFn(_p_) 1786 sched.safePointWait-- 1787 if sched.safePointWait == 0 { 1788 notewakeup(&sched.safePointNote) 1789 } 1790 } 1791 if sched.runqsize != 0 { 1792 unlock(&sched.lock) 1793 startm(_p_, false) 1794 return 1795 } 1796 // If this is the last running P and nobody is polling network, 1797 // need to wakeup another M to poll network. 1798 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 1799 unlock(&sched.lock) 1800 startm(_p_, false) 1801 return 1802 } 1803 pidleput(_p_) 1804 unlock(&sched.lock) 1805 } 1806 1807 // Tries to add one more P to execute G's. 1808 // Called when a G is made runnable (newproc, ready). 1809 func wakep() { 1810 // be conservative about spinning threads 1811 if !atomic.Cas(&sched.nmspinning, 0, 1) { 1812 return 1813 } 1814 startm(nil, true) 1815 } 1816 1817 // Stops execution of the current m that is locked to a g until the g is runnable again. 1818 // Returns with acquired P. 1819 func stoplockedm() { 1820 _g_ := getg() 1821 1822 if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m { 1823 throw("stoplockedm: inconsistent locking") 1824 } 1825 if _g_.m.p != 0 { 1826 // Schedule another M to run this p. 1827 _p_ := releasep() 1828 handoffp(_p_) 1829 } 1830 incidlelocked(1) 1831 // Wait until another thread schedules lockedg again. 1832 notesleep(&_g_.m.park) 1833 noteclear(&_g_.m.park) 1834 status := readgstatus(_g_.m.lockedg) 1835 if status&^_Gscan != _Grunnable { 1836 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 1837 dumpgstatus(_g_) 1838 throw("stoplockedm: not runnable") 1839 } 1840 acquirep(_g_.m.nextp.ptr()) 1841 _g_.m.nextp = 0 1842 } 1843 1844 // Schedules the locked m to run the locked gp. 1845 // May run during STW, so write barriers are not allowed. 1846 //go:nowritebarrierrec 1847 func startlockedm(gp *g) { 1848 _g_ := getg() 1849 1850 mp := gp.lockedm 1851 if mp == _g_.m { 1852 throw("startlockedm: locked to me") 1853 } 1854 if mp.nextp != 0 { 1855 throw("startlockedm: m has p") 1856 } 1857 // directly handoff current P to the locked m 1858 incidlelocked(-1) 1859 _p_ := releasep() 1860 mp.nextp.set(_p_) 1861 notewakeup(&mp.park) 1862 stopm() 1863 } 1864 1865 // Stops the current m for stopTheWorld. 1866 // Returns when the world is restarted. 1867 func gcstopm() { 1868 _g_ := getg() 1869 1870 if sched.gcwaiting == 0 { 1871 throw("gcstopm: not waiting for gc") 1872 } 1873 if _g_.m.spinning { 1874 _g_.m.spinning = false 1875 // OK to just drop nmspinning here, 1876 // startTheWorld will unpark threads as necessary. 1877 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1878 throw("gcstopm: negative nmspinning") 1879 } 1880 } 1881 _p_ := releasep() 1882 lock(&sched.lock) 1883 _p_.status = _Pgcstop 1884 sched.stopwait-- 1885 if sched.stopwait == 0 { 1886 notewakeup(&sched.stopnote) 1887 } 1888 unlock(&sched.lock) 1889 stopm() 1890 } 1891 1892 // Schedules gp to run on the current M. 1893 // If inheritTime is true, gp inherits the remaining time in the 1894 // current time slice. Otherwise, it starts a new time slice. 1895 // Never returns. 1896 // 1897 // Write barriers are allowed because this is called immediately after 1898 // acquiring a P in several places. 1899 // 1900 //go:yeswritebarrierrec 1901 func execute(gp *g, inheritTime bool) { 1902 _g_ := getg() 1903 1904 casgstatus(gp, _Grunnable, _Grunning) 1905 gp.waitsince = 0 1906 gp.preempt = false 1907 gp.stackguard0 = gp.stack.lo + _StackGuard 1908 if !inheritTime { 1909 _g_.m.p.ptr().schedtick++ 1910 } 1911 _g_.m.curg = gp 1912 gp.m = _g_.m 1913 1914 // Check whether the profiler needs to be turned on or off. 1915 hz := sched.profilehz 1916 if _g_.m.profilehz != hz { 1917 setThreadCPUProfiler(hz) 1918 } 1919 1920 if trace.enabled { 1921 // GoSysExit has to happen when we have a P, but before GoStart. 1922 // So we emit it here. 1923 if gp.syscallsp != 0 && gp.sysblocktraced { 1924 traceGoSysExit(gp.sysexitticks) 1925 } 1926 traceGoStart() 1927 } 1928 1929 gogo(&gp.sched) 1930 } 1931 1932 // Finds a runnable goroutine to execute. 1933 // Tries to steal from other P's, get g from global queue, poll network. 1934 func findrunnable() (gp *g, inheritTime bool) { 1935 _g_ := getg() 1936 1937 // The conditions here and in handoffp must agree: if 1938 // findrunnable would return a G to run, handoffp must start 1939 // an M. 1940 1941 top: 1942 _p_ := _g_.m.p.ptr() 1943 if sched.gcwaiting != 0 { 1944 gcstopm() 1945 goto top 1946 } 1947 if _p_.runSafePointFn != 0 { 1948 runSafePointFn() 1949 } 1950 if fingwait && fingwake { 1951 if gp := wakefing(); gp != nil { 1952 ready(gp, 0, true) 1953 } 1954 } 1955 if *cgo_yield != nil { 1956 asmcgocall(*cgo_yield, nil) 1957 } 1958 1959 // local runq 1960 if gp, inheritTime := runqget(_p_); gp != nil { 1961 return gp, inheritTime 1962 } 1963 1964 // global runq 1965 if sched.runqsize != 0 { 1966 lock(&sched.lock) 1967 gp := globrunqget(_p_, 0) 1968 unlock(&sched.lock) 1969 if gp != nil { 1970 return gp, false 1971 } 1972 } 1973 1974 // Poll network. 1975 // This netpoll is only an optimization before we resort to stealing. 1976 // We can safely skip it if there a thread blocked in netpoll already. 1977 // If there is any kind of logical race with that blocked thread 1978 // (e.g. it has already returned from netpoll, but does not set lastpoll yet), 1979 // this thread will do blocking netpoll below anyway. 1980 if netpollinited() && sched.lastpoll != 0 { 1981 if gp := netpoll(false); gp != nil { // non-blocking 1982 // netpoll returns list of goroutines linked by schedlink. 1983 injectglist(gp.schedlink.ptr()) 1984 casgstatus(gp, _Gwaiting, _Grunnable) 1985 if trace.enabled { 1986 traceGoUnpark(gp, 0) 1987 } 1988 return gp, false 1989 } 1990 } 1991 1992 // Steal work from other P's. 1993 procs := uint32(gomaxprocs) 1994 if atomic.Load(&sched.npidle) == procs-1 { 1995 // Either GOMAXPROCS=1 or everybody, except for us, is idle already. 1996 // New work can appear from returning syscall/cgocall, network or timers. 1997 // Neither of that submits to local run queues, so no point in stealing. 1998 goto stop 1999 } 2000 // If number of spinning M's >= number of busy P's, block. 2001 // This is necessary to prevent excessive CPU consumption 2002 // when GOMAXPROCS>>1 but the program parallelism is low. 2003 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { 2004 goto stop 2005 } 2006 if !_g_.m.spinning { 2007 _g_.m.spinning = true 2008 atomic.Xadd(&sched.nmspinning, 1) 2009 } 2010 for i := 0; i < 4; i++ { 2011 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { 2012 if sched.gcwaiting != 0 { 2013 goto top 2014 } 2015 stealRunNextG := i > 2 // first look for ready queues with more than 1 g 2016 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil { 2017 return gp, false 2018 } 2019 } 2020 } 2021 2022 stop: 2023 2024 // We have nothing to do. If we're in the GC mark phase, can 2025 // safely scan and blacken objects, and have work to do, run 2026 // idle-time marking rather than give up the P. 2027 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) { 2028 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 2029 gp := _p_.gcBgMarkWorker.ptr() 2030 casgstatus(gp, _Gwaiting, _Grunnable) 2031 if trace.enabled { 2032 traceGoUnpark(gp, 0) 2033 } 2034 return gp, false 2035 } 2036 2037 // return P and block 2038 lock(&sched.lock) 2039 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { 2040 unlock(&sched.lock) 2041 goto top 2042 } 2043 if sched.runqsize != 0 { 2044 gp := globrunqget(_p_, 0) 2045 unlock(&sched.lock) 2046 return gp, false 2047 } 2048 if releasep() != _p_ { 2049 throw("findrunnable: wrong p") 2050 } 2051 pidleput(_p_) 2052 unlock(&sched.lock) 2053 2054 // Delicate dance: thread transitions from spinning to non-spinning state, 2055 // potentially concurrently with submission of new goroutines. We must 2056 // drop nmspinning first and then check all per-P queues again (with 2057 // #StoreLoad memory barrier in between). If we do it the other way around, 2058 // another thread can submit a goroutine after we've checked all run queues 2059 // but before we drop nmspinning; as the result nobody will unpark a thread 2060 // to run the goroutine. 2061 // If we discover new work below, we need to restore m.spinning as a signal 2062 // for resetspinning to unpark a new worker thread (because there can be more 2063 // than one starving goroutine). However, if after discovering new work 2064 // we also observe no idle Ps, it is OK to just park the current thread: 2065 // the system is fully loaded so no spinning threads are required. 2066 // Also see "Worker thread parking/unparking" comment at the top of the file. 2067 wasSpinning := _g_.m.spinning 2068 if _g_.m.spinning { 2069 _g_.m.spinning = false 2070 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2071 throw("findrunnable: negative nmspinning") 2072 } 2073 } 2074 2075 // check all runqueues once again 2076 for i := 0; i < int(gomaxprocs); i++ { 2077 _p_ := allp[i] 2078 if _p_ != nil && !runqempty(_p_) { 2079 lock(&sched.lock) 2080 _p_ = pidleget() 2081 unlock(&sched.lock) 2082 if _p_ != nil { 2083 acquirep(_p_) 2084 if wasSpinning { 2085 _g_.m.spinning = true 2086 atomic.Xadd(&sched.nmspinning, 1) 2087 } 2088 goto top 2089 } 2090 break 2091 } 2092 } 2093 2094 // Check for idle-priority GC work again. 2095 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) { 2096 lock(&sched.lock) 2097 _p_ = pidleget() 2098 if _p_ != nil && _p_.gcBgMarkWorker == 0 { 2099 pidleput(_p_) 2100 _p_ = nil 2101 } 2102 unlock(&sched.lock) 2103 if _p_ != nil { 2104 acquirep(_p_) 2105 if wasSpinning { 2106 _g_.m.spinning = true 2107 atomic.Xadd(&sched.nmspinning, 1) 2108 } 2109 // Go back to idle GC check. 2110 goto stop 2111 } 2112 } 2113 2114 // poll network 2115 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 2116 if _g_.m.p != 0 { 2117 throw("findrunnable: netpoll with p") 2118 } 2119 if _g_.m.spinning { 2120 throw("findrunnable: netpoll with spinning") 2121 } 2122 gp := netpoll(true) // block until new work is available 2123 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 2124 if gp != nil { 2125 lock(&sched.lock) 2126 _p_ = pidleget() 2127 unlock(&sched.lock) 2128 if _p_ != nil { 2129 acquirep(_p_) 2130 injectglist(gp.schedlink.ptr()) 2131 casgstatus(gp, _Gwaiting, _Grunnable) 2132 if trace.enabled { 2133 traceGoUnpark(gp, 0) 2134 } 2135 return gp, false 2136 } 2137 injectglist(gp) 2138 } 2139 } 2140 stopm() 2141 goto top 2142 } 2143 2144 // pollWork returns true if there is non-background work this P could 2145 // be doing. This is a fairly lightweight check to be used for 2146 // background work loops, like idle GC. It checks a subset of the 2147 // conditions checked by the actual scheduler. 2148 func pollWork() bool { 2149 if sched.runqsize != 0 { 2150 return true 2151 } 2152 p := getg().m.p.ptr() 2153 if !runqempty(p) { 2154 return true 2155 } 2156 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { 2157 if gp := netpoll(false); gp != nil { 2158 injectglist(gp) 2159 return true 2160 } 2161 } 2162 return false 2163 } 2164 2165 func resetspinning() { 2166 _g_ := getg() 2167 if !_g_.m.spinning { 2168 throw("resetspinning: not a spinning m") 2169 } 2170 _g_.m.spinning = false 2171 nmspinning := atomic.Xadd(&sched.nmspinning, -1) 2172 if int32(nmspinning) < 0 { 2173 throw("findrunnable: negative nmspinning") 2174 } 2175 // M wakeup policy is deliberately somewhat conservative, so check if we 2176 // need to wakeup another P here. See "Worker thread parking/unparking" 2177 // comment at the top of the file for details. 2178 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 { 2179 wakep() 2180 } 2181 } 2182 2183 // Injects the list of runnable G's into the scheduler. 2184 // Can run concurrently with GC. 2185 func injectglist(glist *g) { 2186 if glist == nil { 2187 return 2188 } 2189 if trace.enabled { 2190 for gp := glist; gp != nil; gp = gp.schedlink.ptr() { 2191 traceGoUnpark(gp, 0) 2192 } 2193 } 2194 lock(&sched.lock) 2195 var n int 2196 for n = 0; glist != nil; n++ { 2197 gp := glist 2198 glist = gp.schedlink.ptr() 2199 casgstatus(gp, _Gwaiting, _Grunnable) 2200 globrunqput(gp) 2201 } 2202 unlock(&sched.lock) 2203 for ; n != 0 && sched.npidle != 0; n-- { 2204 startm(nil, false) 2205 } 2206 } 2207 2208 // One round of scheduler: find a runnable goroutine and execute it. 2209 // Never returns. 2210 func schedule() { 2211 _g_ := getg() 2212 2213 if _g_.m.locks != 0 { 2214 throw("schedule: holding locks") 2215 } 2216 2217 if _g_.m.lockedg != nil { 2218 stoplockedm() 2219 execute(_g_.m.lockedg, false) // Never returns. 2220 } 2221 2222 top: 2223 if sched.gcwaiting != 0 { 2224 gcstopm() 2225 goto top 2226 } 2227 if _g_.m.p.ptr().runSafePointFn != 0 { 2228 runSafePointFn() 2229 } 2230 2231 var gp *g 2232 var inheritTime bool 2233 if trace.enabled || trace.shutdown { 2234 gp = traceReader() 2235 if gp != nil { 2236 casgstatus(gp, _Gwaiting, _Grunnable) 2237 traceGoUnpark(gp, 0) 2238 } 2239 } 2240 if gp == nil && gcBlackenEnabled != 0 { 2241 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 2242 } 2243 if gp == nil { 2244 // Check the global runnable queue once in a while to ensure fairness. 2245 // Otherwise two goroutines can completely occupy the local runqueue 2246 // by constantly respawning each other. 2247 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 2248 lock(&sched.lock) 2249 gp = globrunqget(_g_.m.p.ptr(), 1) 2250 unlock(&sched.lock) 2251 } 2252 } 2253 if gp == nil { 2254 gp, inheritTime = runqget(_g_.m.p.ptr()) 2255 if gp != nil && _g_.m.spinning { 2256 throw("schedule: spinning with local work") 2257 } 2258 } 2259 if gp == nil { 2260 gp, inheritTime = findrunnable() // blocks until work is available 2261 } 2262 2263 // This thread is going to run a goroutine and is not spinning anymore, 2264 // so if it was marked as spinning we need to reset it now and potentially 2265 // start a new spinning M. 2266 if _g_.m.spinning { 2267 resetspinning() 2268 } 2269 2270 if gp.lockedm != nil { 2271 // Hands off own p to the locked m, 2272 // then blocks waiting for a new p. 2273 startlockedm(gp) 2274 goto top 2275 } 2276 2277 execute(gp, inheritTime) 2278 } 2279 2280 // dropg removes the association between m and the current goroutine m->curg (gp for short). 2281 // Typically a caller sets gp's status away from Grunning and then 2282 // immediately calls dropg to finish the job. The caller is also responsible 2283 // for arranging that gp will be restarted using ready at an 2284 // appropriate time. After calling dropg and arranging for gp to be 2285 // readied later, the caller can do other work but eventually should 2286 // call schedule to restart the scheduling of goroutines on this m. 2287 func dropg() { 2288 _g_ := getg() 2289 2290 setMNoWB(&_g_.m.curg.m, nil) 2291 setGNoWB(&_g_.m.curg, nil) 2292 } 2293 2294 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 2295 unlock((*mutex)(lock)) 2296 return true 2297 } 2298 2299 // park continuation on g0. 2300 func park_m(gp *g) { 2301 _g_ := getg() 2302 2303 if trace.enabled { 2304 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip) 2305 } 2306 2307 casgstatus(gp, _Grunning, _Gwaiting) 2308 dropg() 2309 2310 if _g_.m.waitunlockf != nil { 2311 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf)) 2312 ok := fn(gp, _g_.m.waitlock) 2313 _g_.m.waitunlockf = nil 2314 _g_.m.waitlock = nil 2315 if !ok { 2316 if trace.enabled { 2317 traceGoUnpark(gp, 2) 2318 } 2319 casgstatus(gp, _Gwaiting, _Grunnable) 2320 execute(gp, true) // Schedule it back, never returns. 2321 } 2322 } 2323 schedule() 2324 } 2325 2326 func goschedImpl(gp *g) { 2327 status := readgstatus(gp) 2328 if status&^_Gscan != _Grunning { 2329 dumpgstatus(gp) 2330 throw("bad g status") 2331 } 2332 casgstatus(gp, _Grunning, _Grunnable) 2333 dropg() 2334 lock(&sched.lock) 2335 globrunqput(gp) 2336 unlock(&sched.lock) 2337 2338 schedule() 2339 } 2340 2341 // Gosched continuation on g0. 2342 func gosched_m(gp *g) { 2343 if trace.enabled { 2344 traceGoSched() 2345 } 2346 goschedImpl(gp) 2347 } 2348 2349 // goschedguarded is a forbidden-states-avoided version of gosched_m 2350 func goschedguarded_m(gp *g) { 2351 2352 if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning { 2353 gogo(&gp.sched) // never return 2354 } 2355 2356 if trace.enabled { 2357 traceGoSched() 2358 } 2359 goschedImpl(gp) 2360 } 2361 2362 func gopreempt_m(gp *g) { 2363 if trace.enabled { 2364 traceGoPreempt() 2365 } 2366 goschedImpl(gp) 2367 } 2368 2369 // Finishes execution of the current goroutine. 2370 func goexit1() { 2371 if raceenabled { 2372 racegoend() 2373 } 2374 if trace.enabled { 2375 traceGoEnd() 2376 } 2377 mcall(goexit0) 2378 } 2379 2380 // goexit continuation on g0. 2381 func goexit0(gp *g) { 2382 _g_ := getg() 2383 2384 casgstatus(gp, _Grunning, _Gdead) 2385 if isSystemGoroutine(gp) { 2386 atomic.Xadd(&sched.ngsys, -1) 2387 } 2388 gp.m = nil 2389 gp.lockedm = nil 2390 _g_.m.lockedg = nil 2391 gp.paniconfault = false 2392 gp._defer = nil // should be true already but just in case. 2393 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 2394 gp.writebuf = nil 2395 gp.waitreason = "" 2396 gp.param = nil 2397 gp.labels = nil 2398 gp.timer = nil 2399 2400 // Note that gp's stack scan is now "valid" because it has no 2401 // stack. 2402 gp.gcscanvalid = true 2403 dropg() 2404 2405 if _g_.m.locked&^_LockExternal != 0 { 2406 print("invalid m->locked = ", _g_.m.locked, "\n") 2407 throw("internal lockOSThread error") 2408 } 2409 _g_.m.locked = 0 2410 gfput(_g_.m.p.ptr(), gp) 2411 schedule() 2412 } 2413 2414 // save updates getg().sched to refer to pc and sp so that a following 2415 // gogo will restore pc and sp. 2416 // 2417 // save must not have write barriers because invoking a write barrier 2418 // can clobber getg().sched. 2419 // 2420 //go:nosplit 2421 //go:nowritebarrierrec 2422 func save(pc, sp uintptr) { 2423 _g_ := getg() 2424 2425 _g_.sched.pc = pc 2426 _g_.sched.sp = sp 2427 _g_.sched.lr = 0 2428 _g_.sched.ret = 0 2429 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 2430 // We need to ensure ctxt is zero, but can't have a write 2431 // barrier here. However, it should always already be zero. 2432 // Assert that. 2433 if _g_.sched.ctxt != nil { 2434 badctxt() 2435 } 2436 } 2437 2438 // The goroutine g is about to enter a system call. 2439 // Record that it's not using the cpu anymore. 2440 // This is called only from the go syscall library and cgocall, 2441 // not from the low-level system calls used by the runtime. 2442 // 2443 // Entersyscall cannot split the stack: the gosave must 2444 // make g->sched refer to the caller's stack segment, because 2445 // entersyscall is going to return immediately after. 2446 // 2447 // Nothing entersyscall calls can split the stack either. 2448 // We cannot safely move the stack during an active call to syscall, 2449 // because we do not know which of the uintptr arguments are 2450 // really pointers (back into the stack). 2451 // In practice, this means that we make the fast path run through 2452 // entersyscall doing no-split things, and the slow path has to use systemstack 2453 // to run bigger things on the system stack. 2454 // 2455 // reentersyscall is the entry point used by cgo callbacks, where explicitly 2456 // saved SP and PC are restored. This is needed when exitsyscall will be called 2457 // from a function further up in the call stack than the parent, as g->syscallsp 2458 // must always point to a valid stack frame. entersyscall below is the normal 2459 // entry point for syscalls, which obtains the SP and PC from the caller. 2460 // 2461 // Syscall tracing: 2462 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 2463 // If the syscall does not block, that is it, we do not emit any other events. 2464 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 2465 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 2466 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 2467 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 2468 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 2469 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 2470 // and we wait for the increment before emitting traceGoSysExit. 2471 // Note that the increment is done even if tracing is not enabled, 2472 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 2473 // 2474 //go:nosplit 2475 func reentersyscall(pc, sp uintptr) { 2476 _g_ := getg() 2477 2478 // Disable preemption because during this function g is in Gsyscall status, 2479 // but can have inconsistent g->sched, do not let GC observe it. 2480 _g_.m.locks++ 2481 2482 // Entersyscall must not call any function that might split/grow the stack. 2483 // (See details in comment above.) 2484 // Catch calls that might, by replacing the stack guard with something that 2485 // will trip any stack check and leaving a flag to tell newstack to die. 2486 _g_.stackguard0 = stackPreempt 2487 _g_.throwsplit = true 2488 2489 // Leave SP around for GC and traceback. 2490 save(pc, sp) 2491 _g_.syscallsp = sp 2492 _g_.syscallpc = pc 2493 casgstatus(_g_, _Grunning, _Gsyscall) 2494 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2495 systemstack(func() { 2496 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2497 throw("entersyscall") 2498 }) 2499 } 2500 2501 if trace.enabled { 2502 systemstack(traceGoSysCall) 2503 // systemstack itself clobbers g.sched.{pc,sp} and we might 2504 // need them later when the G is genuinely blocked in a 2505 // syscall 2506 save(pc, sp) 2507 } 2508 2509 if atomic.Load(&sched.sysmonwait) != 0 { 2510 systemstack(entersyscall_sysmon) 2511 save(pc, sp) 2512 } 2513 2514 if _g_.m.p.ptr().runSafePointFn != 0 { 2515 // runSafePointFn may stack split if run on this stack 2516 systemstack(runSafePointFn) 2517 save(pc, sp) 2518 } 2519 2520 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2521 _g_.sysblocktraced = true 2522 _g_.m.mcache = nil 2523 _g_.m.p.ptr().m = 0 2524 atomic.Store(&_g_.m.p.ptr().status, _Psyscall) 2525 if sched.gcwaiting != 0 { 2526 systemstack(entersyscall_gcwait) 2527 save(pc, sp) 2528 } 2529 2530 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). 2531 // We set _StackGuard to StackPreempt so that first split stack check calls morestack. 2532 // Morestack detects this case and throws. 2533 _g_.stackguard0 = stackPreempt 2534 _g_.m.locks-- 2535 } 2536 2537 // Standard syscall entry used by the go syscall library and normal cgo calls. 2538 //go:nosplit 2539 func entersyscall(dummy int32) { 2540 reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2541 } 2542 2543 func entersyscall_sysmon() { 2544 lock(&sched.lock) 2545 if atomic.Load(&sched.sysmonwait) != 0 { 2546 atomic.Store(&sched.sysmonwait, 0) 2547 notewakeup(&sched.sysmonnote) 2548 } 2549 unlock(&sched.lock) 2550 } 2551 2552 func entersyscall_gcwait() { 2553 _g_ := getg() 2554 _p_ := _g_.m.p.ptr() 2555 2556 lock(&sched.lock) 2557 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 2558 if trace.enabled { 2559 traceGoSysBlock(_p_) 2560 traceProcStop(_p_) 2561 } 2562 _p_.syscalltick++ 2563 if sched.stopwait--; sched.stopwait == 0 { 2564 notewakeup(&sched.stopnote) 2565 } 2566 } 2567 unlock(&sched.lock) 2568 } 2569 2570 // The same as entersyscall(), but with a hint that the syscall is blocking. 2571 //go:nosplit 2572 func entersyscallblock(dummy int32) { 2573 _g_ := getg() 2574 2575 _g_.m.locks++ // see comment in entersyscall 2576 _g_.throwsplit = true 2577 _g_.stackguard0 = stackPreempt // see comment in entersyscall 2578 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2579 _g_.sysblocktraced = true 2580 _g_.m.p.ptr().syscalltick++ 2581 2582 // Leave SP around for GC and traceback. 2583 pc := getcallerpc(unsafe.Pointer(&dummy)) 2584 sp := getcallersp(unsafe.Pointer(&dummy)) 2585 save(pc, sp) 2586 _g_.syscallsp = _g_.sched.sp 2587 _g_.syscallpc = _g_.sched.pc 2588 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2589 sp1 := sp 2590 sp2 := _g_.sched.sp 2591 sp3 := _g_.syscallsp 2592 systemstack(func() { 2593 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2594 throw("entersyscallblock") 2595 }) 2596 } 2597 casgstatus(_g_, _Grunning, _Gsyscall) 2598 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2599 systemstack(func() { 2600 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2601 throw("entersyscallblock") 2602 }) 2603 } 2604 2605 systemstack(entersyscallblock_handoff) 2606 2607 // Resave for traceback during blocked call. 2608 save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2609 2610 _g_.m.locks-- 2611 } 2612 2613 func entersyscallblock_handoff() { 2614 if trace.enabled { 2615 traceGoSysCall() 2616 traceGoSysBlock(getg().m.p.ptr()) 2617 } 2618 handoffp(releasep()) 2619 } 2620 2621 // The goroutine g exited its system call. 2622 // Arrange for it to run on a cpu again. 2623 // This is called only from the go syscall library, not 2624 // from the low-level system calls used by the runtime. 2625 // 2626 // Write barriers are not allowed because our P may have been stolen. 2627 // 2628 //go:nosplit 2629 //go:nowritebarrierrec 2630 func exitsyscall(dummy int32) { 2631 _g_ := getg() 2632 2633 _g_.m.locks++ // see comment in entersyscall 2634 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp { 2635 // throw calls print which may try to grow the stack, 2636 // but throwsplit == true so the stack can not be grown; 2637 // use systemstack to avoid that possible problem. 2638 systemstack(func() { 2639 throw("exitsyscall: syscall frame is no longer valid") 2640 }) 2641 } 2642 2643 _g_.waitsince = 0 2644 oldp := _g_.m.p.ptr() 2645 if exitsyscallfast() { 2646 if _g_.m.mcache == nil { 2647 throw("lost mcache") 2648 } 2649 if trace.enabled { 2650 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2651 systemstack(traceGoStart) 2652 } 2653 } 2654 // There's a cpu for us, so we can run. 2655 _g_.m.p.ptr().syscalltick++ 2656 // We need to cas the status and scan before resuming... 2657 casgstatus(_g_, _Gsyscall, _Grunning) 2658 2659 // Garbage collector isn't running (since we are), 2660 // so okay to clear syscallsp. 2661 _g_.syscallsp = 0 2662 _g_.m.locks-- 2663 if _g_.preempt { 2664 // restore the preemption request in case we've cleared it in newstack 2665 _g_.stackguard0 = stackPreempt 2666 } else { 2667 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 2668 _g_.stackguard0 = _g_.stack.lo + _StackGuard 2669 } 2670 _g_.throwsplit = false 2671 return 2672 } 2673 2674 _g_.sysexitticks = 0 2675 if trace.enabled { 2676 // Wait till traceGoSysBlock event is emitted. 2677 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2678 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 2679 osyield() 2680 } 2681 // We can't trace syscall exit right now because we don't have a P. 2682 // Tracing code can invoke write barriers that cannot run without a P. 2683 // So instead we remember the syscall exit time and emit the event 2684 // in execute when we have a P. 2685 _g_.sysexitticks = cputicks() 2686 } 2687 2688 _g_.m.locks-- 2689 2690 // Call the scheduler. 2691 mcall(exitsyscall0) 2692 2693 if _g_.m.mcache == nil { 2694 throw("lost mcache") 2695 } 2696 2697 // Scheduler returned, so we're allowed to run now. 2698 // Delete the syscallsp information that we left for 2699 // the garbage collector during the system call. 2700 // Must wait until now because until gosched returns 2701 // we don't know for sure that the garbage collector 2702 // is not running. 2703 _g_.syscallsp = 0 2704 _g_.m.p.ptr().syscalltick++ 2705 _g_.throwsplit = false 2706 } 2707 2708 //go:nosplit 2709 func exitsyscallfast() bool { 2710 _g_ := getg() 2711 2712 // Freezetheworld sets stopwait but does not retake P's. 2713 if sched.stopwait == freezeStopWait { 2714 _g_.m.mcache = nil 2715 _g_.m.p = 0 2716 return false 2717 } 2718 2719 // Try to re-acquire the last P. 2720 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) { 2721 // There's a cpu for us, so we can run. 2722 exitsyscallfast_reacquired() 2723 return true 2724 } 2725 2726 // Try to get any other idle P. 2727 oldp := _g_.m.p.ptr() 2728 _g_.m.mcache = nil 2729 _g_.m.p = 0 2730 if sched.pidle != 0 { 2731 var ok bool 2732 systemstack(func() { 2733 ok = exitsyscallfast_pidle() 2734 if ok && trace.enabled { 2735 if oldp != nil { 2736 // Wait till traceGoSysBlock event is emitted. 2737 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2738 for oldp.syscalltick == _g_.m.syscalltick { 2739 osyield() 2740 } 2741 } 2742 traceGoSysExit(0) 2743 } 2744 }) 2745 if ok { 2746 return true 2747 } 2748 } 2749 return false 2750 } 2751 2752 // exitsyscallfast_reacquired is the exitsyscall path on which this G 2753 // has successfully reacquired the P it was running on before the 2754 // syscall. 2755 // 2756 // This function is allowed to have write barriers because exitsyscall 2757 // has acquired a P at this point. 2758 // 2759 //go:yeswritebarrierrec 2760 //go:nosplit 2761 func exitsyscallfast_reacquired() { 2762 _g_ := getg() 2763 _g_.m.mcache = _g_.m.p.ptr().mcache 2764 _g_.m.p.ptr().m.set(_g_.m) 2765 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2766 if trace.enabled { 2767 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 2768 // traceGoSysBlock for this syscall was already emitted, 2769 // but here we effectively retake the p from the new syscall running on the same p. 2770 systemstack(func() { 2771 // Denote blocking of the new syscall. 2772 traceGoSysBlock(_g_.m.p.ptr()) 2773 // Denote completion of the current syscall. 2774 traceGoSysExit(0) 2775 }) 2776 } 2777 _g_.m.p.ptr().syscalltick++ 2778 } 2779 } 2780 2781 func exitsyscallfast_pidle() bool { 2782 lock(&sched.lock) 2783 _p_ := pidleget() 2784 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 2785 atomic.Store(&sched.sysmonwait, 0) 2786 notewakeup(&sched.sysmonnote) 2787 } 2788 unlock(&sched.lock) 2789 if _p_ != nil { 2790 acquirep(_p_) 2791 return true 2792 } 2793 return false 2794 } 2795 2796 // exitsyscall slow path on g0. 2797 // Failed to acquire P, enqueue gp as runnable. 2798 // 2799 //go:nowritebarrierrec 2800 func exitsyscall0(gp *g) { 2801 _g_ := getg() 2802 2803 casgstatus(gp, _Gsyscall, _Grunnable) 2804 dropg() 2805 lock(&sched.lock) 2806 _p_ := pidleget() 2807 if _p_ == nil { 2808 globrunqput(gp) 2809 } else if atomic.Load(&sched.sysmonwait) != 0 { 2810 atomic.Store(&sched.sysmonwait, 0) 2811 notewakeup(&sched.sysmonnote) 2812 } 2813 unlock(&sched.lock) 2814 if _p_ != nil { 2815 acquirep(_p_) 2816 execute(gp, false) // Never returns. 2817 } 2818 if _g_.m.lockedg != nil { 2819 // Wait until another thread schedules gp and so m again. 2820 stoplockedm() 2821 execute(gp, false) // Never returns. 2822 } 2823 stopm() 2824 schedule() // Never returns. 2825 } 2826 2827 func beforefork() { 2828 gp := getg().m.curg 2829 2830 // Block signals during a fork, so that the child does not run 2831 // a signal handler before exec if a signal is sent to the process 2832 // group. See issue #18600. 2833 gp.m.locks++ 2834 msigsave(gp.m) 2835 sigblock() 2836 2837 // This function is called before fork in syscall package. 2838 // Code between fork and exec must not allocate memory nor even try to grow stack. 2839 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 2840 // runtime_AfterFork will undo this in parent process, but not in child. 2841 gp.stackguard0 = stackFork 2842 } 2843 2844 // Called from syscall package before fork. 2845 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 2846 //go:nosplit 2847 func syscall_runtime_BeforeFork() { 2848 systemstack(beforefork) 2849 } 2850 2851 func afterfork() { 2852 gp := getg().m.curg 2853 2854 // See the comments in beforefork. 2855 gp.stackguard0 = gp.stack.lo + _StackGuard 2856 2857 msigrestore(gp.m.sigmask) 2858 2859 gp.m.locks-- 2860 } 2861 2862 // Called from syscall package after fork in parent. 2863 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 2864 //go:nosplit 2865 func syscall_runtime_AfterFork() { 2866 systemstack(afterfork) 2867 } 2868 2869 // inForkedChild is true while manipulating signals in the child process. 2870 // This is used to avoid calling libc functions in case we are using vfork. 2871 var inForkedChild bool 2872 2873 // Called from syscall package after fork in child. 2874 // It resets non-sigignored signals to the default handler, and 2875 // restores the signal mask in preparation for the exec. 2876 // 2877 // Because this might be called during a vfork, and therefore may be 2878 // temporarily sharing address space with the parent process, this must 2879 // not change any global variables or calling into C code that may do so. 2880 // 2881 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild 2882 //go:nosplit 2883 //go:nowritebarrierrec 2884 func syscall_runtime_AfterForkInChild() { 2885 // It's OK to change the global variable inForkedChild here 2886 // because we are going to change it back. There is no race here, 2887 // because if we are sharing address space with the parent process, 2888 // then the parent process can not be running concurrently. 2889 inForkedChild = true 2890 2891 clearSignalHandlers() 2892 2893 // When we are the child we are the only thread running, 2894 // so we know that nothing else has changed gp.m.sigmask. 2895 msigrestore(getg().m.sigmask) 2896 2897 inForkedChild = false 2898 } 2899 2900 // Called from syscall package before Exec. 2901 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec 2902 func syscall_runtime_BeforeExec() { 2903 // Prevent thread creation during exec. 2904 execLock.lock() 2905 } 2906 2907 // Called from syscall package after Exec. 2908 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec 2909 func syscall_runtime_AfterExec() { 2910 execLock.unlock() 2911 } 2912 2913 // Allocate a new g, with a stack big enough for stacksize bytes. 2914 func malg(stacksize int32) *g { 2915 newg := new(g) 2916 if stacksize >= 0 { 2917 stacksize = round2(_StackSystem + stacksize) 2918 systemstack(func() { 2919 newg.stack = stackalloc(uint32(stacksize)) 2920 }) 2921 newg.stackguard0 = newg.stack.lo + _StackGuard 2922 newg.stackguard1 = ^uintptr(0) 2923 } 2924 return newg 2925 } 2926 2927 // Create a new g running fn with siz bytes of arguments. 2928 // Put it on the queue of g's waiting to run. 2929 // The compiler turns a go statement into a call to this. 2930 // Cannot split the stack because it assumes that the arguments 2931 // are available sequentially after &fn; they would not be 2932 // copied if a stack split occurred. 2933 //go:nosplit 2934 func newproc(siz int32, fn *funcval) { 2935 argp := add(unsafe.Pointer(&fn), sys.PtrSize) 2936 pc := getcallerpc(unsafe.Pointer(&siz)) 2937 systemstack(func() { 2938 newproc1(fn, (*uint8)(argp), siz, 0, pc) 2939 }) 2940 } 2941 2942 // Create a new g running fn with narg bytes of arguments starting 2943 // at argp and returning nret bytes of results. callerpc is the 2944 // address of the go statement that created this. The new g is put 2945 // on the queue of g's waiting to run. 2946 func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g { 2947 _g_ := getg() 2948 2949 if fn == nil { 2950 _g_.m.throwing = -1 // do not dump full stacks 2951 throw("go of nil func value") 2952 } 2953 _g_.m.locks++ // disable preemption because it can be holding p in a local var 2954 siz := narg + nret 2955 siz = (siz + 7) &^ 7 2956 2957 // We could allocate a larger initial stack if necessary. 2958 // Not worth it: this is almost always an error. 2959 // 4*sizeof(uintreg): extra space added below 2960 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 2961 if siz >= _StackMin-4*sys.RegSize-sys.RegSize { 2962 throw("newproc: function arguments too large for new goroutine") 2963 } 2964 2965 _p_ := _g_.m.p.ptr() 2966 newg := gfget(_p_) 2967 if newg == nil { 2968 newg = malg(_StackMin) 2969 casgstatus(newg, _Gidle, _Gdead) 2970 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 2971 } 2972 if newg.stack.hi == 0 { 2973 throw("newproc1: newg missing stack") 2974 } 2975 2976 if readgstatus(newg) != _Gdead { 2977 throw("newproc1: new g is not Gdead") 2978 } 2979 2980 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame 2981 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign 2982 sp := newg.stack.hi - totalSize 2983 spArg := sp 2984 if usesLR { 2985 // caller's LR 2986 *(*uintptr)(unsafe.Pointer(sp)) = 0 2987 prepGoExitFrame(sp) 2988 spArg += sys.MinFrameSize 2989 } 2990 if narg > 0 { 2991 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) 2992 // This is a stack-to-stack copy. If write barriers 2993 // are enabled and the source stack is grey (the 2994 // destination is always black), then perform a 2995 // barrier copy. We do this *after* the memmove 2996 // because the destination stack may have garbage on 2997 // it. 2998 if writeBarrier.needed && !_g_.m.curg.gcscandone { 2999 f := findfunc(fn.fn) 3000 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 3001 // We're in the prologue, so it's always stack map index 0. 3002 bv := stackmapdata(stkmap, 0) 3003 bulkBarrierBitmap(spArg, spArg, uintptr(narg), 0, bv.bytedata) 3004 } 3005 } 3006 3007 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 3008 newg.sched.sp = sp 3009 newg.stktopsp = sp 3010 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 3011 newg.sched.g = guintptr(unsafe.Pointer(newg)) 3012 gostartcallfn(&newg.sched, fn) 3013 newg.gopc = callerpc 3014 newg.startpc = fn.fn 3015 if _g_.m.curg != nil { 3016 newg.labels = _g_.m.curg.labels 3017 } 3018 if isSystemGoroutine(newg) { 3019 atomic.Xadd(&sched.ngsys, +1) 3020 } 3021 newg.gcscanvalid = false 3022 casgstatus(newg, _Gdead, _Grunnable) 3023 3024 if _p_.goidcache == _p_.goidcacheend { 3025 // Sched.goidgen is the last allocated id, 3026 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 3027 // At startup sched.goidgen=0, so main goroutine receives goid=1. 3028 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 3029 _p_.goidcache -= _GoidCacheBatch - 1 3030 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 3031 } 3032 newg.goid = int64(_p_.goidcache) 3033 _p_.goidcache++ 3034 if raceenabled { 3035 newg.racectx = racegostart(callerpc) 3036 } 3037 if trace.enabled { 3038 traceGoCreate(newg, newg.startpc) 3039 } 3040 runqput(_p_, newg, true) 3041 3042 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted { 3043 wakep() 3044 } 3045 _g_.m.locks-- 3046 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 3047 _g_.stackguard0 = stackPreempt 3048 } 3049 return newg 3050 } 3051 3052 // Put on gfree list. 3053 // If local list is too long, transfer a batch to the global list. 3054 func gfput(_p_ *p, gp *g) { 3055 if readgstatus(gp) != _Gdead { 3056 throw("gfput: bad status (not Gdead)") 3057 } 3058 3059 stksize := gp.stack.hi - gp.stack.lo 3060 3061 if stksize != _FixedStack { 3062 // non-standard stack size - free it. 3063 stackfree(gp.stack) 3064 gp.stack.lo = 0 3065 gp.stack.hi = 0 3066 gp.stackguard0 = 0 3067 } 3068 3069 gp.schedlink.set(_p_.gfree) 3070 _p_.gfree = gp 3071 _p_.gfreecnt++ 3072 if _p_.gfreecnt >= 64 { 3073 lock(&sched.gflock) 3074 for _p_.gfreecnt >= 32 { 3075 _p_.gfreecnt-- 3076 gp = _p_.gfree 3077 _p_.gfree = gp.schedlink.ptr() 3078 if gp.stack.lo == 0 { 3079 gp.schedlink.set(sched.gfreeNoStack) 3080 sched.gfreeNoStack = gp 3081 } else { 3082 gp.schedlink.set(sched.gfreeStack) 3083 sched.gfreeStack = gp 3084 } 3085 sched.ngfree++ 3086 } 3087 unlock(&sched.gflock) 3088 } 3089 } 3090 3091 // Get from gfree list. 3092 // If local list is empty, grab a batch from global list. 3093 func gfget(_p_ *p) *g { 3094 retry: 3095 gp := _p_.gfree 3096 if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) { 3097 lock(&sched.gflock) 3098 for _p_.gfreecnt < 32 { 3099 if sched.gfreeStack != nil { 3100 // Prefer Gs with stacks. 3101 gp = sched.gfreeStack 3102 sched.gfreeStack = gp.schedlink.ptr() 3103 } else if sched.gfreeNoStack != nil { 3104 gp = sched.gfreeNoStack 3105 sched.gfreeNoStack = gp.schedlink.ptr() 3106 } else { 3107 break 3108 } 3109 _p_.gfreecnt++ 3110 sched.ngfree-- 3111 gp.schedlink.set(_p_.gfree) 3112 _p_.gfree = gp 3113 } 3114 unlock(&sched.gflock) 3115 goto retry 3116 } 3117 if gp != nil { 3118 _p_.gfree = gp.schedlink.ptr() 3119 _p_.gfreecnt-- 3120 if gp.stack.lo == 0 { 3121 // Stack was deallocated in gfput. Allocate a new one. 3122 systemstack(func() { 3123 gp.stack = stackalloc(_FixedStack) 3124 }) 3125 gp.stackguard0 = gp.stack.lo + _StackGuard 3126 } else { 3127 if raceenabled { 3128 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3129 } 3130 if msanenabled { 3131 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3132 } 3133 } 3134 } 3135 return gp 3136 } 3137 3138 // Purge all cached G's from gfree list to the global list. 3139 func gfpurge(_p_ *p) { 3140 lock(&sched.gflock) 3141 for _p_.gfreecnt != 0 { 3142 _p_.gfreecnt-- 3143 gp := _p_.gfree 3144 _p_.gfree = gp.schedlink.ptr() 3145 if gp.stack.lo == 0 { 3146 gp.schedlink.set(sched.gfreeNoStack) 3147 sched.gfreeNoStack = gp 3148 } else { 3149 gp.schedlink.set(sched.gfreeStack) 3150 sched.gfreeStack = gp 3151 } 3152 sched.ngfree++ 3153 } 3154 unlock(&sched.gflock) 3155 } 3156 3157 // Breakpoint executes a breakpoint trap. 3158 func Breakpoint() { 3159 breakpoint() 3160 } 3161 3162 // dolockOSThread is called by LockOSThread and lockOSThread below 3163 // after they modify m.locked. Do not allow preemption during this call, 3164 // or else the m might be different in this function than in the caller. 3165 //go:nosplit 3166 func dolockOSThread() { 3167 _g_ := getg() 3168 _g_.m.lockedg = _g_ 3169 _g_.lockedm = _g_.m 3170 } 3171 3172 //go:nosplit 3173 3174 // LockOSThread wires the calling goroutine to its current operating system thread. 3175 // Until the calling goroutine exits or calls UnlockOSThread, it will always 3176 // execute in that thread, and no other goroutine can. 3177 func LockOSThread() { 3178 getg().m.locked |= _LockExternal 3179 dolockOSThread() 3180 } 3181 3182 //go:nosplit 3183 func lockOSThread() { 3184 getg().m.locked += _LockInternal 3185 dolockOSThread() 3186 } 3187 3188 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 3189 // after they update m->locked. Do not allow preemption during this call, 3190 // or else the m might be in different in this function than in the caller. 3191 //go:nosplit 3192 func dounlockOSThread() { 3193 _g_ := getg() 3194 if _g_.m.locked != 0 { 3195 return 3196 } 3197 _g_.m.lockedg = nil 3198 _g_.lockedm = nil 3199 } 3200 3201 //go:nosplit 3202 3203 // UnlockOSThread unwires the calling goroutine from its fixed operating system thread. 3204 // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op. 3205 func UnlockOSThread() { 3206 getg().m.locked &^= _LockExternal 3207 dounlockOSThread() 3208 } 3209 3210 //go:nosplit 3211 func unlockOSThread() { 3212 _g_ := getg() 3213 if _g_.m.locked < _LockInternal { 3214 systemstack(badunlockosthread) 3215 } 3216 _g_.m.locked -= _LockInternal 3217 dounlockOSThread() 3218 } 3219 3220 func badunlockosthread() { 3221 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 3222 } 3223 3224 func gcount() int32 { 3225 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys)) 3226 for _, _p_ := range &allp { 3227 if _p_ == nil { 3228 break 3229 } 3230 n -= _p_.gfreecnt 3231 } 3232 3233 // All these variables can be changed concurrently, so the result can be inconsistent. 3234 // But at least the current goroutine is running. 3235 if n < 1 { 3236 n = 1 3237 } 3238 return n 3239 } 3240 3241 func mcount() int32 { 3242 return sched.mcount 3243 } 3244 3245 var prof struct { 3246 signalLock uint32 3247 hz int32 3248 } 3249 3250 func _System() { _System() } 3251 func _ExternalCode() { _ExternalCode() } 3252 func _LostExternalCode() { _LostExternalCode() } 3253 func _GC() { _GC() } 3254 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } 3255 3256 // Counts SIGPROFs received while in atomic64 critical section, on mips{,le} 3257 var lostAtomic64Count uint64 3258 3259 // Called if we receive a SIGPROF signal. 3260 // Called by the signal handler, may run during STW. 3261 //go:nowritebarrierrec 3262 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 3263 if prof.hz == 0 { 3264 return 3265 } 3266 3267 // On mips{,le}, 64bit atomics are emulated with spinlocks, in 3268 // runtime/internal/atomic. If SIGPROF arrives while the program is inside 3269 // the critical section, it creates a deadlock (when writing the sample). 3270 // As a workaround, create a counter of SIGPROFs while in critical section 3271 // to store the count, and pass it to sigprof.add() later when SIGPROF is 3272 // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc). 3273 if GOARCH == "mips" || GOARCH == "mipsle" { 3274 if f := findfunc(pc); f.valid() { 3275 if hasprefix(funcname(f), "runtime/internal/atomic") { 3276 lostAtomic64Count++ 3277 return 3278 } 3279 } 3280 } 3281 3282 // Profiling runs concurrently with GC, so it must not allocate. 3283 // Set a trap in case the code does allocate. 3284 // Note that on windows, one thread takes profiles of all the 3285 // other threads, so mp is usually not getg().m. 3286 // In fact mp may not even be stopped. 3287 // See golang.org/issue/17165. 3288 getg().m.mallocing++ 3289 3290 // Define that a "user g" is a user-created goroutine, and a "system g" 3291 // is one that is m->g0 or m->gsignal. 3292 // 3293 // We might be interrupted for profiling halfway through a 3294 // goroutine switch. The switch involves updating three (or four) values: 3295 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 3296 // because once it gets updated the new g is running. 3297 // 3298 // When switching from a user g to a system g, LR is not considered live, 3299 // so the update only affects g, SP, and PC. Since PC must be last, there 3300 // the possible partial transitions in ordinary execution are (1) g alone is updated, 3301 // (2) both g and SP are updated, and (3) SP alone is updated. 3302 // If SP or g alone is updated, we can detect the partial transition by checking 3303 // whether the SP is within g's stack bounds. (We could also require that SP 3304 // be changed only after g, but the stack bounds check is needed by other 3305 // cases, so there is no need to impose an additional requirement.) 3306 // 3307 // There is one exceptional transition to a system g, not in ordinary execution. 3308 // When a signal arrives, the operating system starts the signal handler running 3309 // with an updated PC and SP. The g is updated last, at the beginning of the 3310 // handler. There are two reasons this is okay. First, until g is updated the 3311 // g and SP do not match, so the stack bounds check detects the partial transition. 3312 // Second, signal handlers currently run with signals disabled, so a profiling 3313 // signal cannot arrive during the handler. 3314 // 3315 // When switching from a system g to a user g, there are three possibilities. 3316 // 3317 // First, it may be that the g switch has no PC update, because the SP 3318 // either corresponds to a user g throughout (as in asmcgocall) 3319 // or because it has been arranged to look like a user g frame 3320 // (as in cgocallback_gofunc). In this case, since the entire 3321 // transition is a g+SP update, a partial transition updating just one of 3322 // those will be detected by the stack bounds check. 3323 // 3324 // Second, when returning from a signal handler, the PC and SP updates 3325 // are performed by the operating system in an atomic update, so the g 3326 // update must be done before them. The stack bounds check detects 3327 // the partial transition here, and (again) signal handlers run with signals 3328 // disabled, so a profiling signal cannot arrive then anyway. 3329 // 3330 // Third, the common case: it may be that the switch updates g, SP, and PC 3331 // separately. If the PC is within any of the functions that does this, 3332 // we don't ask for a traceback. C.F. the function setsSP for more about this. 3333 // 3334 // There is another apparently viable approach, recorded here in case 3335 // the "PC within setsSP function" check turns out not to be usable. 3336 // It would be possible to delay the update of either g or SP until immediately 3337 // before the PC update instruction. Then, because of the stack bounds check, 3338 // the only problematic interrupt point is just before that PC update instruction, 3339 // and the sigprof handler can detect that instruction and simulate stepping past 3340 // it in order to reach a consistent state. On ARM, the update of g must be made 3341 // in two places (in R10 and also in a TLS slot), so the delayed update would 3342 // need to be the SP update. The sigprof handler must read the instruction at 3343 // the current PC and if it was the known instruction (for example, JMP BX or 3344 // MOV R2, PC), use that other register in place of the PC value. 3345 // The biggest drawback to this solution is that it requires that we can tell 3346 // whether it's safe to read from the memory pointed at by PC. 3347 // In a correct program, we can test PC == nil and otherwise read, 3348 // but if a profiling signal happens at the instant that a program executes 3349 // a bad jump (before the program manages to handle the resulting fault) 3350 // the profiling handler could fault trying to read nonexistent memory. 3351 // 3352 // To recap, there are no constraints on the assembly being used for the 3353 // transition. We simply require that g and SP match and that the PC is not 3354 // in gogo. 3355 traceback := true 3356 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) { 3357 traceback = false 3358 } 3359 var stk [maxCPUProfStack]uintptr 3360 n := 0 3361 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 3362 cgoOff := 0 3363 // Check cgoCallersUse to make sure that we are not 3364 // interrupting other code that is fiddling with 3365 // cgoCallers. We are running in a signal handler 3366 // with all signals blocked, so we don't have to worry 3367 // about any other code interrupting us. 3368 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 { 3369 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { 3370 cgoOff++ 3371 } 3372 copy(stk[:], mp.cgoCallers[:cgoOff]) 3373 mp.cgoCallers[0] = 0 3374 } 3375 3376 // Collect Go stack that leads to the cgo call. 3377 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0) 3378 } else if traceback { 3379 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 3380 } 3381 3382 if n <= 0 { 3383 // Normal traceback is impossible or has failed. 3384 // See if it falls into several common cases. 3385 n = 0 3386 if GOOS == "windows" && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 3387 // Libcall, i.e. runtime syscall on windows. 3388 // Collect Go stack that leads to the call. 3389 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 3390 } 3391 if n == 0 { 3392 // If all of the above has failed, account it against abstract "System" or "GC". 3393 n = 2 3394 // "ExternalCode" is better than "etext". 3395 if pc > firstmoduledata.etext { 3396 pc = funcPC(_ExternalCode) + sys.PCQuantum 3397 } 3398 stk[0] = pc 3399 if mp.preemptoff != "" || mp.helpgc != 0 { 3400 stk[1] = funcPC(_GC) + sys.PCQuantum 3401 } else { 3402 stk[1] = funcPC(_System) + sys.PCQuantum 3403 } 3404 } 3405 } 3406 3407 if prof.hz != 0 { 3408 if (GOARCH == "mips" || GOARCH == "mipsle") && lostAtomic64Count > 0 { 3409 cpuprof.addLostAtomic64(lostAtomic64Count) 3410 lostAtomic64Count = 0 3411 } 3412 cpuprof.add(gp, stk[:n]) 3413 } 3414 getg().m.mallocing-- 3415 } 3416 3417 // If the signal handler receives a SIGPROF signal on a non-Go thread, 3418 // it tries to collect a traceback into sigprofCallers. 3419 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback. 3420 var sigprofCallers cgoCallers 3421 var sigprofCallersUse uint32 3422 3423 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, 3424 // and the signal handler collected a stack trace in sigprofCallers. 3425 // When this is called, sigprofCallersUse will be non-zero. 3426 // g is nil, and what we can do is very limited. 3427 //go:nosplit 3428 //go:nowritebarrierrec 3429 func sigprofNonGo() { 3430 if prof.hz != 0 { 3431 n := 0 3432 for n < len(sigprofCallers) && sigprofCallers[n] != 0 { 3433 n++ 3434 } 3435 cpuprof.addNonGo(sigprofCallers[:n]) 3436 } 3437 3438 atomic.Store(&sigprofCallersUse, 0) 3439 } 3440 3441 // sigprofNonGoPC is called when a profiling signal arrived on a 3442 // non-Go thread and we have a single PC value, not a stack trace. 3443 // g is nil, and what we can do is very limited. 3444 //go:nosplit 3445 //go:nowritebarrierrec 3446 func sigprofNonGoPC(pc uintptr) { 3447 if prof.hz != 0 { 3448 stk := []uintptr{ 3449 pc, 3450 funcPC(_ExternalCode) + sys.PCQuantum, 3451 } 3452 cpuprof.addNonGo(stk) 3453 } 3454 } 3455 3456 // Reports whether a function will set the SP 3457 // to an absolute value. Important that 3458 // we don't traceback when these are at the bottom 3459 // of the stack since we can't be sure that we will 3460 // find the caller. 3461 // 3462 // If the function is not on the bottom of the stack 3463 // we assume that it will have set it up so that traceback will be consistent, 3464 // either by being a traceback terminating function 3465 // or putting one on the stack at the right offset. 3466 func setsSP(pc uintptr) bool { 3467 f := findfunc(pc) 3468 if !f.valid() { 3469 // couldn't find the function for this PC, 3470 // so assume the worst and stop traceback 3471 return true 3472 } 3473 switch f.entry { 3474 case gogoPC, systemstackPC, mcallPC, morestackPC: 3475 return true 3476 } 3477 return false 3478 } 3479 3480 // setcpuprofilerate sets the CPU profiling rate to hz times per second. 3481 // If hz <= 0, setcpuprofilerate turns off CPU profiling. 3482 func setcpuprofilerate(hz int32) { 3483 // Force sane arguments. 3484 if hz < 0 { 3485 hz = 0 3486 } 3487 3488 // Disable preemption, otherwise we can be rescheduled to another thread 3489 // that has profiling enabled. 3490 _g_ := getg() 3491 _g_.m.locks++ 3492 3493 // Stop profiler on this thread so that it is safe to lock prof. 3494 // if a profiling signal came in while we had prof locked, 3495 // it would deadlock. 3496 setThreadCPUProfiler(0) 3497 3498 for !atomic.Cas(&prof.signalLock, 0, 1) { 3499 osyield() 3500 } 3501 if prof.hz != hz { 3502 setProcessCPUProfiler(hz) 3503 prof.hz = hz 3504 } 3505 atomic.Store(&prof.signalLock, 0) 3506 3507 lock(&sched.lock) 3508 sched.profilehz = hz 3509 unlock(&sched.lock) 3510 3511 if hz != 0 { 3512 setThreadCPUProfiler(hz) 3513 } 3514 3515 _g_.m.locks-- 3516 } 3517 3518 // Change number of processors. The world is stopped, sched is locked. 3519 // gcworkbufs are not being modified by either the GC or 3520 // the write barrier code. 3521 // Returns list of Ps with local work, they need to be scheduled by the caller. 3522 func procresize(nprocs int32) *p { 3523 old := gomaxprocs 3524 if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs { 3525 throw("procresize: invalid arg") 3526 } 3527 if trace.enabled { 3528 traceGomaxprocs(nprocs) 3529 } 3530 3531 // update statistics 3532 now := nanotime() 3533 if sched.procresizetime != 0 { 3534 sched.totaltime += int64(old) * (now - sched.procresizetime) 3535 } 3536 sched.procresizetime = now 3537 3538 // initialize new P's 3539 for i := int32(0); i < nprocs; i++ { 3540 pp := allp[i] 3541 if pp == nil { 3542 pp = new(p) 3543 pp.id = i 3544 pp.status = _Pgcstop 3545 pp.sudogcache = pp.sudogbuf[:0] 3546 for i := range pp.deferpool { 3547 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 3548 } 3549 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 3550 } 3551 if pp.mcache == nil { 3552 if old == 0 && i == 0 { 3553 if getg().m.mcache == nil { 3554 throw("missing mcache?") 3555 } 3556 pp.mcache = getg().m.mcache // bootstrap 3557 } else { 3558 pp.mcache = allocmcache() 3559 } 3560 } 3561 if raceenabled && pp.racectx == 0 { 3562 if old == 0 && i == 0 { 3563 pp.racectx = raceprocctx0 3564 raceprocctx0 = 0 // bootstrap 3565 } else { 3566 pp.racectx = raceproccreate() 3567 } 3568 } 3569 } 3570 3571 // free unused P's 3572 for i := nprocs; i < old; i++ { 3573 p := allp[i] 3574 if trace.enabled && p == getg().m.p.ptr() { 3575 // moving to p[0], pretend that we were descheduled 3576 // and then scheduled again to keep the trace sane. 3577 traceGoSched() 3578 traceProcStop(p) 3579 } 3580 // move all runnable goroutines to the global queue 3581 for p.runqhead != p.runqtail { 3582 // pop from tail of local queue 3583 p.runqtail-- 3584 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr() 3585 // push onto head of global queue 3586 globrunqputhead(gp) 3587 } 3588 if p.runnext != 0 { 3589 globrunqputhead(p.runnext.ptr()) 3590 p.runnext = 0 3591 } 3592 // if there's a background worker, make it runnable and put 3593 // it on the global queue so it can clean itself up 3594 if gp := p.gcBgMarkWorker.ptr(); gp != nil { 3595 casgstatus(gp, _Gwaiting, _Grunnable) 3596 if trace.enabled { 3597 traceGoUnpark(gp, 0) 3598 } 3599 globrunqput(gp) 3600 // This assignment doesn't race because the 3601 // world is stopped. 3602 p.gcBgMarkWorker.set(nil) 3603 } 3604 for i := range p.sudogbuf { 3605 p.sudogbuf[i] = nil 3606 } 3607 p.sudogcache = p.sudogbuf[:0] 3608 for i := range p.deferpool { 3609 for j := range p.deferpoolbuf[i] { 3610 p.deferpoolbuf[i][j] = nil 3611 } 3612 p.deferpool[i] = p.deferpoolbuf[i][:0] 3613 } 3614 freemcache(p.mcache) 3615 p.mcache = nil 3616 gfpurge(p) 3617 traceProcFree(p) 3618 if raceenabled { 3619 raceprocdestroy(p.racectx) 3620 p.racectx = 0 3621 } 3622 p.status = _Pdead 3623 // can't free P itself because it can be referenced by an M in syscall 3624 } 3625 3626 _g_ := getg() 3627 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 3628 // continue to use the current P 3629 _g_.m.p.ptr().status = _Prunning 3630 } else { 3631 // release the current P and acquire allp[0] 3632 if _g_.m.p != 0 { 3633 _g_.m.p.ptr().m = 0 3634 } 3635 _g_.m.p = 0 3636 _g_.m.mcache = nil 3637 p := allp[0] 3638 p.m = 0 3639 p.status = _Pidle 3640 acquirep(p) 3641 if trace.enabled { 3642 traceGoStart() 3643 } 3644 } 3645 var runnablePs *p 3646 for i := nprocs - 1; i >= 0; i-- { 3647 p := allp[i] 3648 if _g_.m.p.ptr() == p { 3649 continue 3650 } 3651 p.status = _Pidle 3652 if runqempty(p) { 3653 pidleput(p) 3654 } else { 3655 p.m.set(mget()) 3656 p.link.set(runnablePs) 3657 runnablePs = p 3658 } 3659 } 3660 stealOrder.reset(uint32(nprocs)) 3661 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 3662 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 3663 return runnablePs 3664 } 3665 3666 // Associate p and the current m. 3667 // 3668 // This function is allowed to have write barriers even if the caller 3669 // isn't because it immediately acquires _p_. 3670 // 3671 //go:yeswritebarrierrec 3672 func acquirep(_p_ *p) { 3673 // Do the part that isn't allowed to have write barriers. 3674 acquirep1(_p_) 3675 3676 // have p; write barriers now allowed 3677 _g_ := getg() 3678 _g_.m.mcache = _p_.mcache 3679 3680 if trace.enabled { 3681 traceProcStart() 3682 } 3683 } 3684 3685 // acquirep1 is the first step of acquirep, which actually acquires 3686 // _p_. This is broken out so we can disallow write barriers for this 3687 // part, since we don't yet have a P. 3688 // 3689 //go:nowritebarrierrec 3690 func acquirep1(_p_ *p) { 3691 _g_ := getg() 3692 3693 if _g_.m.p != 0 || _g_.m.mcache != nil { 3694 throw("acquirep: already in go") 3695 } 3696 if _p_.m != 0 || _p_.status != _Pidle { 3697 id := int32(0) 3698 if _p_.m != 0 { 3699 id = _p_.m.ptr().id 3700 } 3701 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 3702 throw("acquirep: invalid p state") 3703 } 3704 _g_.m.p.set(_p_) 3705 _p_.m.set(_g_.m) 3706 _p_.status = _Prunning 3707 } 3708 3709 // Disassociate p and the current m. 3710 func releasep() *p { 3711 _g_ := getg() 3712 3713 if _g_.m.p == 0 || _g_.m.mcache == nil { 3714 throw("releasep: invalid arg") 3715 } 3716 _p_ := _g_.m.p.ptr() 3717 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 3718 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 3719 throw("releasep: invalid p state") 3720 } 3721 if trace.enabled { 3722 traceProcStop(_g_.m.p.ptr()) 3723 } 3724 _g_.m.p = 0 3725 _g_.m.mcache = nil 3726 _p_.m = 0 3727 _p_.status = _Pidle 3728 return _p_ 3729 } 3730 3731 func incidlelocked(v int32) { 3732 lock(&sched.lock) 3733 sched.nmidlelocked += v 3734 if v > 0 { 3735 checkdead() 3736 } 3737 unlock(&sched.lock) 3738 } 3739 3740 // Check for deadlock situation. 3741 // The check is based on number of running M's, if 0 -> deadlock. 3742 func checkdead() { 3743 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 3744 // there are no running goroutines. The calling program is 3745 // assumed to be running. 3746 if islibrary || isarchive { 3747 return 3748 } 3749 3750 // If we are dying because of a signal caught on an already idle thread, 3751 // freezetheworld will cause all running threads to block. 3752 // And runtime will essentially enter into deadlock state, 3753 // except that there is a thread that will call exit soon. 3754 if panicking > 0 { 3755 return 3756 } 3757 3758 // -1 for sysmon 3759 run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1 3760 if run > 0 { 3761 return 3762 } 3763 if run < 0 { 3764 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n") 3765 throw("checkdead: inconsistent counts") 3766 } 3767 3768 grunning := 0 3769 lock(&allglock) 3770 for i := 0; i < len(allgs); i++ { 3771 gp := allgs[i] 3772 if isSystemGoroutine(gp) { 3773 continue 3774 } 3775 s := readgstatus(gp) 3776 switch s &^ _Gscan { 3777 case _Gwaiting: 3778 grunning++ 3779 case _Grunnable, 3780 _Grunning, 3781 _Gsyscall: 3782 unlock(&allglock) 3783 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 3784 throw("checkdead: runnable g") 3785 } 3786 } 3787 unlock(&allglock) 3788 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 3789 throw("no goroutines (main called runtime.Goexit) - deadlock!") 3790 } 3791 3792 // Maybe jump time forward for playground. 3793 gp := timejump() 3794 if gp != nil { 3795 casgstatus(gp, _Gwaiting, _Grunnable) 3796 globrunqput(gp) 3797 _p_ := pidleget() 3798 if _p_ == nil { 3799 throw("checkdead: no p for timer") 3800 } 3801 mp := mget() 3802 if mp == nil { 3803 // There should always be a free M since 3804 // nothing is running. 3805 throw("checkdead: no m for timer") 3806 } 3807 mp.nextp.set(_p_) 3808 notewakeup(&mp.park) 3809 return 3810 } 3811 3812 getg().m.throwing = -1 // do not dump full stacks 3813 throw("all goroutines are asleep - deadlock!") 3814 } 3815 3816 // forcegcperiod is the maximum time in nanoseconds between garbage 3817 // collections. If we go this long without a garbage collection, one 3818 // is forced to run. 3819 // 3820 // This is a variable for testing purposes. It normally doesn't change. 3821 var forcegcperiod int64 = 2 * 60 * 1e9 3822 3823 // Always runs without a P, so write barriers are not allowed. 3824 // 3825 //go:nowritebarrierrec 3826 func sysmon() { 3827 // If a heap span goes unused for 5 minutes after a garbage collection, 3828 // we hand it back to the operating system. 3829 scavengelimit := int64(5 * 60 * 1e9) 3830 3831 if debug.scavenge > 0 { 3832 // Scavenge-a-lot for testing. 3833 forcegcperiod = 10 * 1e6 3834 scavengelimit = 20 * 1e6 3835 } 3836 3837 lastscavenge := nanotime() 3838 nscavenge := 0 3839 3840 lasttrace := int64(0) 3841 idle := 0 // how many cycles in succession we had not wokeup somebody 3842 delay := uint32(0) 3843 for { 3844 if idle == 0 { // start with 20us sleep... 3845 delay = 20 3846 } else if idle > 50 { // start doubling the sleep after 1ms... 3847 delay *= 2 3848 } 3849 if delay > 10*1000 { // up to 10ms 3850 delay = 10 * 1000 3851 } 3852 usleep(delay) 3853 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { 3854 lock(&sched.lock) 3855 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 3856 atomic.Store(&sched.sysmonwait, 1) 3857 unlock(&sched.lock) 3858 // Make wake-up period small enough 3859 // for the sampling to be correct. 3860 maxsleep := forcegcperiod / 2 3861 if scavengelimit < forcegcperiod { 3862 maxsleep = scavengelimit / 2 3863 } 3864 shouldRelax := true 3865 if osRelaxMinNS > 0 { 3866 lock(&timers.lock) 3867 if timers.sleeping { 3868 now := nanotime() 3869 next := timers.sleepUntil 3870 if next-now < osRelaxMinNS { 3871 shouldRelax = false 3872 } 3873 } 3874 unlock(&timers.lock) 3875 } 3876 if shouldRelax { 3877 osRelax(true) 3878 } 3879 notetsleep(&sched.sysmonnote, maxsleep) 3880 if shouldRelax { 3881 osRelax(false) 3882 } 3883 lock(&sched.lock) 3884 atomic.Store(&sched.sysmonwait, 0) 3885 noteclear(&sched.sysmonnote) 3886 idle = 0 3887 delay = 20 3888 } 3889 unlock(&sched.lock) 3890 } 3891 // trigger libc interceptors if needed 3892 if *cgo_yield != nil { 3893 asmcgocall(*cgo_yield, nil) 3894 } 3895 // poll network if not polled for more than 10ms 3896 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 3897 now := nanotime() 3898 if lastpoll != 0 && lastpoll+10*1000*1000 < now { 3899 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 3900 gp := netpoll(false) // non-blocking - returns list of goroutines 3901 if gp != nil { 3902 // Need to decrement number of idle locked M's 3903 // (pretending that one more is running) before injectglist. 3904 // Otherwise it can lead to the following situation: 3905 // injectglist grabs all P's but before it starts M's to run the P's, 3906 // another M returns from syscall, finishes running its G, 3907 // observes that there is no work to do and no other running M's 3908 // and reports deadlock. 3909 incidlelocked(-1) 3910 injectglist(gp) 3911 incidlelocked(1) 3912 } 3913 } 3914 // retake P's blocked in syscalls 3915 // and preempt long running G's 3916 if retake(now) != 0 { 3917 idle = 0 3918 } else { 3919 idle++ 3920 } 3921 // check if we need to force a GC 3922 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { 3923 lock(&forcegc.lock) 3924 forcegc.idle = 0 3925 forcegc.g.schedlink = 0 3926 injectglist(forcegc.g) 3927 unlock(&forcegc.lock) 3928 } 3929 // scavenge heap once in a while 3930 if lastscavenge+scavengelimit/2 < now { 3931 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) 3932 lastscavenge = now 3933 nscavenge++ 3934 } 3935 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { 3936 lasttrace = now 3937 schedtrace(debug.scheddetail > 0) 3938 } 3939 } 3940 } 3941 3942 type sysmontick struct { 3943 schedtick uint32 3944 schedwhen int64 3945 syscalltick uint32 3946 syscallwhen int64 3947 } 3948 3949 // forcePreemptNS is the time slice given to a G before it is 3950 // preempted. 3951 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 3952 3953 func retake(now int64) uint32 { 3954 n := 0 3955 for i := int32(0); i < gomaxprocs; i++ { 3956 _p_ := allp[i] 3957 if _p_ == nil { 3958 continue 3959 } 3960 pd := &_p_.sysmontick 3961 s := _p_.status 3962 if s == _Psyscall { 3963 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 3964 t := int64(_p_.syscalltick) 3965 if int64(pd.syscalltick) != t { 3966 pd.syscalltick = uint32(t) 3967 pd.syscallwhen = now 3968 continue 3969 } 3970 // On the one hand we don't want to retake Ps if there is no other work to do, 3971 // but on the other hand we want to retake them eventually 3972 // because they can prevent the sysmon thread from deep sleep. 3973 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 3974 continue 3975 } 3976 // Need to decrement number of idle locked M's 3977 // (pretending that one more is running) before the CAS. 3978 // Otherwise the M from which we retake can exit the syscall, 3979 // increment nmidle and report deadlock. 3980 incidlelocked(-1) 3981 if atomic.Cas(&_p_.status, s, _Pidle) { 3982 if trace.enabled { 3983 traceGoSysBlock(_p_) 3984 traceProcStop(_p_) 3985 } 3986 n++ 3987 _p_.syscalltick++ 3988 handoffp(_p_) 3989 } 3990 incidlelocked(1) 3991 } else if s == _Prunning { 3992 // Preempt G if it's running for too long. 3993 t := int64(_p_.schedtick) 3994 if int64(pd.schedtick) != t { 3995 pd.schedtick = uint32(t) 3996 pd.schedwhen = now 3997 continue 3998 } 3999 if pd.schedwhen+forcePreemptNS > now { 4000 continue 4001 } 4002 preemptone(_p_) 4003 } 4004 } 4005 return uint32(n) 4006 } 4007 4008 // Tell all goroutines that they have been preempted and they should stop. 4009 // This function is purely best-effort. It can fail to inform a goroutine if a 4010 // processor just started running it. 4011 // No locks need to be held. 4012 // Returns true if preemption request was issued to at least one goroutine. 4013 func preemptall() bool { 4014 res := false 4015 for i := int32(0); i < gomaxprocs; i++ { 4016 _p_ := allp[i] 4017 if _p_ == nil || _p_.status != _Prunning { 4018 continue 4019 } 4020 if preemptone(_p_) { 4021 res = true 4022 } 4023 } 4024 return res 4025 } 4026 4027 // Tell the goroutine running on processor P to stop. 4028 // This function is purely best-effort. It can incorrectly fail to inform the 4029 // goroutine. It can send inform the wrong goroutine. Even if it informs the 4030 // correct goroutine, that goroutine might ignore the request if it is 4031 // simultaneously executing newstack. 4032 // No lock needs to be held. 4033 // Returns true if preemption request was issued. 4034 // The actual preemption will happen at some point in the future 4035 // and will be indicated by the gp->status no longer being 4036 // Grunning 4037 func preemptone(_p_ *p) bool { 4038 mp := _p_.m.ptr() 4039 if mp == nil || mp == getg().m { 4040 return false 4041 } 4042 gp := mp.curg 4043 if gp == nil || gp == mp.g0 { 4044 return false 4045 } 4046 4047 gp.preempt = true 4048 4049 // Every call in a go routine checks for stack overflow by 4050 // comparing the current stack pointer to gp->stackguard0. 4051 // Setting gp->stackguard0 to StackPreempt folds 4052 // preemption into the normal stack overflow check. 4053 gp.stackguard0 = stackPreempt 4054 return true 4055 } 4056 4057 var starttime int64 4058 4059 func schedtrace(detailed bool) { 4060 now := nanotime() 4061 if starttime == 0 { 4062 starttime = now 4063 } 4064 4065 lock(&sched.lock) 4066 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 4067 if detailed { 4068 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 4069 } 4070 // We must be careful while reading data from P's, M's and G's. 4071 // Even if we hold schedlock, most data can be changed concurrently. 4072 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 4073 for i := int32(0); i < gomaxprocs; i++ { 4074 _p_ := allp[i] 4075 if _p_ == nil { 4076 continue 4077 } 4078 mp := _p_.m.ptr() 4079 h := atomic.Load(&_p_.runqhead) 4080 t := atomic.Load(&_p_.runqtail) 4081 if detailed { 4082 id := int32(-1) 4083 if mp != nil { 4084 id = mp.id 4085 } 4086 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") 4087 } else { 4088 // In non-detailed mode format lengths of per-P run queues as: 4089 // [len1 len2 len3 len4] 4090 print(" ") 4091 if i == 0 { 4092 print("[") 4093 } 4094 print(t - h) 4095 if i == gomaxprocs-1 { 4096 print("]\n") 4097 } 4098 } 4099 } 4100 4101 if !detailed { 4102 unlock(&sched.lock) 4103 return 4104 } 4105 4106 for mp := allm; mp != nil; mp = mp.alllink { 4107 _p_ := mp.p.ptr() 4108 gp := mp.curg 4109 lockedg := mp.lockedg 4110 id1 := int32(-1) 4111 if _p_ != nil { 4112 id1 = _p_.id 4113 } 4114 id2 := int64(-1) 4115 if gp != nil { 4116 id2 = gp.goid 4117 } 4118 id3 := int64(-1) 4119 if lockedg != nil { 4120 id3 = lockedg.goid 4121 } 4122 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") 4123 } 4124 4125 lock(&allglock) 4126 for gi := 0; gi < len(allgs); gi++ { 4127 gp := allgs[gi] 4128 mp := gp.m 4129 lockedm := gp.lockedm 4130 id1 := int32(-1) 4131 if mp != nil { 4132 id1 = mp.id 4133 } 4134 id2 := int32(-1) 4135 if lockedm != nil { 4136 id2 = lockedm.id 4137 } 4138 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n") 4139 } 4140 unlock(&allglock) 4141 unlock(&sched.lock) 4142 } 4143 4144 // Put mp on midle list. 4145 // Sched must be locked. 4146 // May run during STW, so write barriers are not allowed. 4147 //go:nowritebarrierrec 4148 func mput(mp *m) { 4149 mp.schedlink = sched.midle 4150 sched.midle.set(mp) 4151 sched.nmidle++ 4152 checkdead() 4153 } 4154 4155 // Try to get an m from midle list. 4156 // Sched must be locked. 4157 // May run during STW, so write barriers are not allowed. 4158 //go:nowritebarrierrec 4159 func mget() *m { 4160 mp := sched.midle.ptr() 4161 if mp != nil { 4162 sched.midle = mp.schedlink 4163 sched.nmidle-- 4164 } 4165 return mp 4166 } 4167 4168 // Put gp on the global runnable queue. 4169 // Sched must be locked. 4170 // May run during STW, so write barriers are not allowed. 4171 //go:nowritebarrierrec 4172 func globrunqput(gp *g) { 4173 gp.schedlink = 0 4174 if sched.runqtail != 0 { 4175 sched.runqtail.ptr().schedlink.set(gp) 4176 } else { 4177 sched.runqhead.set(gp) 4178 } 4179 sched.runqtail.set(gp) 4180 sched.runqsize++ 4181 } 4182 4183 // Put gp at the head of the global runnable queue. 4184 // Sched must be locked. 4185 // May run during STW, so write barriers are not allowed. 4186 //go:nowritebarrierrec 4187 func globrunqputhead(gp *g) { 4188 gp.schedlink = sched.runqhead 4189 sched.runqhead.set(gp) 4190 if sched.runqtail == 0 { 4191 sched.runqtail.set(gp) 4192 } 4193 sched.runqsize++ 4194 } 4195 4196 // Put a batch of runnable goroutines on the global runnable queue. 4197 // Sched must be locked. 4198 func globrunqputbatch(ghead *g, gtail *g, n int32) { 4199 gtail.schedlink = 0 4200 if sched.runqtail != 0 { 4201 sched.runqtail.ptr().schedlink.set(ghead) 4202 } else { 4203 sched.runqhead.set(ghead) 4204 } 4205 sched.runqtail.set(gtail) 4206 sched.runqsize += n 4207 } 4208 4209 // Try get a batch of G's from the global runnable queue. 4210 // Sched must be locked. 4211 func globrunqget(_p_ *p, max int32) *g { 4212 if sched.runqsize == 0 { 4213 return nil 4214 } 4215 4216 n := sched.runqsize/gomaxprocs + 1 4217 if n > sched.runqsize { 4218 n = sched.runqsize 4219 } 4220 if max > 0 && n > max { 4221 n = max 4222 } 4223 if n > int32(len(_p_.runq))/2 { 4224 n = int32(len(_p_.runq)) / 2 4225 } 4226 4227 sched.runqsize -= n 4228 if sched.runqsize == 0 { 4229 sched.runqtail = 0 4230 } 4231 4232 gp := sched.runqhead.ptr() 4233 sched.runqhead = gp.schedlink 4234 n-- 4235 for ; n > 0; n-- { 4236 gp1 := sched.runqhead.ptr() 4237 sched.runqhead = gp1.schedlink 4238 runqput(_p_, gp1, false) 4239 } 4240 return gp 4241 } 4242 4243 // Put p to on _Pidle list. 4244 // Sched must be locked. 4245 // May run during STW, so write barriers are not allowed. 4246 //go:nowritebarrierrec 4247 func pidleput(_p_ *p) { 4248 if !runqempty(_p_) { 4249 throw("pidleput: P has non-empty run queue") 4250 } 4251 _p_.link = sched.pidle 4252 sched.pidle.set(_p_) 4253 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 4254 } 4255 4256 // Try get a p from _Pidle list. 4257 // Sched must be locked. 4258 // May run during STW, so write barriers are not allowed. 4259 //go:nowritebarrierrec 4260 func pidleget() *p { 4261 _p_ := sched.pidle.ptr() 4262 if _p_ != nil { 4263 sched.pidle = _p_.link 4264 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 4265 } 4266 return _p_ 4267 } 4268 4269 // runqempty returns true if _p_ has no Gs on its local run queue. 4270 // It never returns true spuriously. 4271 func runqempty(_p_ *p) bool { 4272 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, 4273 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext. 4274 // Simply observing that runqhead == runqtail and then observing that runqnext == nil 4275 // does not mean the queue is empty. 4276 for { 4277 head := atomic.Load(&_p_.runqhead) 4278 tail := atomic.Load(&_p_.runqtail) 4279 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext))) 4280 if tail == atomic.Load(&_p_.runqtail) { 4281 return head == tail && runnext == 0 4282 } 4283 } 4284 } 4285 4286 // To shake out latent assumptions about scheduling order, 4287 // we introduce some randomness into scheduling decisions 4288 // when running with the race detector. 4289 // The need for this was made obvious by changing the 4290 // (deterministic) scheduling order in Go 1.5 and breaking 4291 // many poorly-written tests. 4292 // With the randomness here, as long as the tests pass 4293 // consistently with -race, they shouldn't have latent scheduling 4294 // assumptions. 4295 const randomizeScheduler = raceenabled 4296 4297 // runqput tries to put g on the local runnable queue. 4298 // If next if false, runqput adds g to the tail of the runnable queue. 4299 // If next is true, runqput puts g in the _p_.runnext slot. 4300 // If the run queue is full, runnext puts g on the global queue. 4301 // Executed only by the owner P. 4302 func runqput(_p_ *p, gp *g, next bool) { 4303 if randomizeScheduler && next && fastrand()%2 == 0 { 4304 next = false 4305 } 4306 4307 if next { 4308 retryNext: 4309 oldnext := _p_.runnext 4310 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 4311 goto retryNext 4312 } 4313 if oldnext == 0 { 4314 return 4315 } 4316 // Kick the old runnext out to the regular run queue. 4317 gp = oldnext.ptr() 4318 } 4319 4320 retry: 4321 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 4322 t := _p_.runqtail 4323 if t-h < uint32(len(_p_.runq)) { 4324 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 4325 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 4326 return 4327 } 4328 if runqputslow(_p_, gp, h, t) { 4329 return 4330 } 4331 // the queue is not full, now the put above must succeed 4332 goto retry 4333 } 4334 4335 // Put g and a batch of work from local runnable queue on global queue. 4336 // Executed only by the owner P. 4337 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 4338 var batch [len(_p_.runq)/2 + 1]*g 4339 4340 // First, grab a batch from local queue. 4341 n := t - h 4342 n = n / 2 4343 if n != uint32(len(_p_.runq)/2) { 4344 throw("runqputslow: queue is not full") 4345 } 4346 for i := uint32(0); i < n; i++ { 4347 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 4348 } 4349 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4350 return false 4351 } 4352 batch[n] = gp 4353 4354 if randomizeScheduler { 4355 for i := uint32(1); i <= n; i++ { 4356 j := fastrandn(i + 1) 4357 batch[i], batch[j] = batch[j], batch[i] 4358 } 4359 } 4360 4361 // Link the goroutines. 4362 for i := uint32(0); i < n; i++ { 4363 batch[i].schedlink.set(batch[i+1]) 4364 } 4365 4366 // Now put the batch on global queue. 4367 lock(&sched.lock) 4368 globrunqputbatch(batch[0], batch[n], int32(n+1)) 4369 unlock(&sched.lock) 4370 return true 4371 } 4372 4373 // Get g from local runnable queue. 4374 // If inheritTime is true, gp should inherit the remaining time in the 4375 // current time slice. Otherwise, it should start a new time slice. 4376 // Executed only by the owner P. 4377 func runqget(_p_ *p) (gp *g, inheritTime bool) { 4378 // If there's a runnext, it's the next G to run. 4379 for { 4380 next := _p_.runnext 4381 if next == 0 { 4382 break 4383 } 4384 if _p_.runnext.cas(next, 0) { 4385 return next.ptr(), true 4386 } 4387 } 4388 4389 for { 4390 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4391 t := _p_.runqtail 4392 if t == h { 4393 return nil, false 4394 } 4395 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 4396 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume 4397 return gp, false 4398 } 4399 } 4400 } 4401 4402 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 4403 // Batch is a ring buffer starting at batchHead. 4404 // Returns number of grabbed goroutines. 4405 // Can be executed by any P. 4406 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 4407 for { 4408 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4409 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer 4410 n := t - h 4411 n = n - n/2 4412 if n == 0 { 4413 if stealRunNextG { 4414 // Try to steal from _p_.runnext. 4415 if next := _p_.runnext; next != 0 { 4416 // Sleep to ensure that _p_ isn't about to run the g we 4417 // are about to steal. 4418 // The important use case here is when the g running on _p_ 4419 // ready()s another g and then almost immediately blocks. 4420 // Instead of stealing runnext in this window, back off 4421 // to give _p_ a chance to schedule runnext. This will avoid 4422 // thrashing gs between different Ps. 4423 // A sync chan send/recv takes ~50ns as of time of writing, 4424 // so 3us gives ~50x overshoot. 4425 if GOOS != "windows" { 4426 usleep(3) 4427 } else { 4428 // On windows system timer granularity is 1-15ms, 4429 // which is way too much for this optimization. 4430 // So just yield. 4431 osyield() 4432 } 4433 if !_p_.runnext.cas(next, 0) { 4434 continue 4435 } 4436 batch[batchHead%uint32(len(batch))] = next 4437 return 1 4438 } 4439 } 4440 return 0 4441 } 4442 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 4443 continue 4444 } 4445 for i := uint32(0); i < n; i++ { 4446 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 4447 batch[(batchHead+i)%uint32(len(batch))] = g 4448 } 4449 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4450 return n 4451 } 4452 } 4453 } 4454 4455 // Steal half of elements from local runnable queue of p2 4456 // and put onto local runnable queue of p. 4457 // Returns one of the stolen elements (or nil if failed). 4458 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 4459 t := _p_.runqtail 4460 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 4461 if n == 0 { 4462 return nil 4463 } 4464 n-- 4465 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 4466 if n == 0 { 4467 return gp 4468 } 4469 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 4470 if t-h+n >= uint32(len(_p_.runq)) { 4471 throw("runqsteal: runq overflow") 4472 } 4473 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 4474 return gp 4475 } 4476 4477 //go:linkname setMaxThreads runtime/debug.setMaxThreads 4478 func setMaxThreads(in int) (out int) { 4479 lock(&sched.lock) 4480 out = int(sched.maxmcount) 4481 if in > 0x7fffffff { // MaxInt32 4482 sched.maxmcount = 0x7fffffff 4483 } else { 4484 sched.maxmcount = int32(in) 4485 } 4486 checkmcount() 4487 unlock(&sched.lock) 4488 return 4489 } 4490 4491 func haveexperiment(name string) bool { 4492 if name == "framepointer" { 4493 return framepointer_enabled // set by linker 4494 } 4495 x := sys.Goexperiment 4496 for x != "" { 4497 xname := "" 4498 i := index(x, ",") 4499 if i < 0 { 4500 xname, x = x, "" 4501 } else { 4502 xname, x = x[:i], x[i+1:] 4503 } 4504 if xname == name { 4505 return true 4506 } 4507 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name { 4508 return false 4509 } 4510 } 4511 return false 4512 } 4513 4514 //go:nosplit 4515 func procPin() int { 4516 _g_ := getg() 4517 mp := _g_.m 4518 4519 mp.locks++ 4520 return int(mp.p.ptr().id) 4521 } 4522 4523 //go:nosplit 4524 func procUnpin() { 4525 _g_ := getg() 4526 _g_.m.locks-- 4527 } 4528 4529 //go:linkname sync_runtime_procPin sync.runtime_procPin 4530 //go:nosplit 4531 func sync_runtime_procPin() int { 4532 return procPin() 4533 } 4534 4535 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 4536 //go:nosplit 4537 func sync_runtime_procUnpin() { 4538 procUnpin() 4539 } 4540 4541 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 4542 //go:nosplit 4543 func sync_atomic_runtime_procPin() int { 4544 return procPin() 4545 } 4546 4547 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 4548 //go:nosplit 4549 func sync_atomic_runtime_procUnpin() { 4550 procUnpin() 4551 } 4552 4553 // Active spinning for sync.Mutex. 4554 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 4555 //go:nosplit 4556 func sync_runtime_canSpin(i int) bool { 4557 // sync.Mutex is cooperative, so we are conservative with spinning. 4558 // Spin only few times and only if running on a multicore machine and 4559 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 4560 // As opposed to runtime mutex we don't do passive spinning here, 4561 // because there can be work on global runq on on other Ps. 4562 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 4563 return false 4564 } 4565 if p := getg().m.p.ptr(); !runqempty(p) { 4566 return false 4567 } 4568 return true 4569 } 4570 4571 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 4572 //go:nosplit 4573 func sync_runtime_doSpin() { 4574 procyield(active_spin_cnt) 4575 } 4576 4577 var stealOrder randomOrder 4578 4579 // randomOrder/randomEnum are helper types for randomized work stealing. 4580 // They allow to enumerate all Ps in different pseudo-random orders without repetitions. 4581 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS 4582 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration. 4583 type randomOrder struct { 4584 count uint32 4585 coprimes []uint32 4586 } 4587 4588 type randomEnum struct { 4589 i uint32 4590 count uint32 4591 pos uint32 4592 inc uint32 4593 } 4594 4595 func (ord *randomOrder) reset(count uint32) { 4596 ord.count = count 4597 ord.coprimes = ord.coprimes[:0] 4598 for i := uint32(1); i <= count; i++ { 4599 if gcd(i, count) == 1 { 4600 ord.coprimes = append(ord.coprimes, i) 4601 } 4602 } 4603 } 4604 4605 func (ord *randomOrder) start(i uint32) randomEnum { 4606 return randomEnum{ 4607 count: ord.count, 4608 pos: i % ord.count, 4609 inc: ord.coprimes[i%uint32(len(ord.coprimes))], 4610 } 4611 } 4612 4613 func (enum *randomEnum) done() bool { 4614 return enum.i == enum.count 4615 } 4616 4617 func (enum *randomEnum) next() { 4618 enum.i++ 4619 enum.pos = (enum.pos + enum.inc) % enum.count 4620 } 4621 4622 func (enum *randomEnum) position() uint32 { 4623 return enum.pos 4624 } 4625 4626 func gcd(a, b uint32) uint32 { 4627 for b != 0 { 4628 a, b = b, a%b 4629 } 4630 return a 4631 }