github.com/karrick/go@v0.0.0-20170817181416-d5b0ec858b37/src/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 var buildVersion = sys.TheVersion 14 15 // Goroutine scheduler 16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 17 // 18 // The main concepts are: 19 // G - goroutine. 20 // M - worker thread, or machine. 21 // P - processor, a resource that is required to execute Go code. 22 // M must have an associated P to execute Go code, however it can be 23 // blocked or in a syscall w/o an associated P. 24 // 25 // Design doc at https://golang.org/s/go11sched. 26 27 // Worker thread parking/unparking. 28 // We need to balance between keeping enough running worker threads to utilize 29 // available hardware parallelism and parking excessive running worker threads 30 // to conserve CPU resources and power. This is not simple for two reasons: 31 // (1) scheduler state is intentionally distributed (in particular, per-P work 32 // queues), so it is not possible to compute global predicates on fast paths; 33 // (2) for optimal thread management we would need to know the future (don't park 34 // a worker thread when a new goroutine will be readied in near future). 35 // 36 // Three rejected approaches that would work badly: 37 // 1. Centralize all scheduler state (would inhibit scalability). 38 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there 39 // is a spare P, unpark a thread and handoff it the thread and the goroutine. 40 // This would lead to thread state thrashing, as the thread that readied the 41 // goroutine can be out of work the very next moment, we will need to park it. 42 // Also, it would destroy locality of computation as we want to preserve 43 // dependent goroutines on the same thread; and introduce additional latency. 44 // 3. Unpark an additional thread whenever we ready a goroutine and there is an 45 // idle P, but don't do handoff. This would lead to excessive thread parking/ 46 // unparking as the additional threads will instantly park without discovering 47 // any work to do. 48 // 49 // The current approach: 50 // We unpark an additional thread when we ready a goroutine if (1) there is an 51 // idle P and there are no "spinning" worker threads. A worker thread is considered 52 // spinning if it is out of local work and did not find work in global run queue/ 53 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. 54 // Threads unparked this way are also considered spinning; we don't do goroutine 55 // handoff so such threads are out of work initially. Spinning threads do some 56 // spinning looking for work in per-P run queues before parking. If a spinning 57 // thread finds work it takes itself out of the spinning state and proceeds to 58 // execution. If it does not find work it takes itself out of the spinning state 59 // and then parks. 60 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark 61 // new threads when readying goroutines. To compensate for that, if the last spinning 62 // thread finds work and stops spinning, it must unpark a new spinning thread. 63 // This approach smooths out unjustified spikes of thread unparking, 64 // but at the same time guarantees eventual maximal CPU parallelism utilization. 65 // 66 // The main implementation complication is that we need to be very careful during 67 // spinning->non-spinning thread transition. This transition can race with submission 68 // of a new goroutine, and either one part or another needs to unpark another worker 69 // thread. If they both fail to do that, we can end up with semi-persistent CPU 70 // underutilization. The general pattern for goroutine readying is: submit a goroutine 71 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. 72 // The general pattern for spinning->non-spinning transition is: decrement nmspinning, 73 // #StoreLoad-style memory barrier, check all per-P work queues for new work. 74 // Note that all this complexity does not apply to global run queue as we are not 75 // sloppy about thread unparking when submitting to global queue. Also see comments 76 // for nmspinning manipulation. 77 78 var ( 79 m0 m 80 g0 g 81 raceprocctx0 uintptr 82 ) 83 84 //go:linkname runtime_init runtime.init 85 func runtime_init() 86 87 //go:linkname main_init main.init 88 func main_init() 89 90 // main_init_done is a signal used by cgocallbackg that initialization 91 // has been completed. It is made before _cgo_notify_runtime_init_done, 92 // so all cgo calls can rely on it existing. When main_init is complete, 93 // it is closed, meaning cgocallbackg can reliably receive from it. 94 var main_init_done chan bool 95 96 //go:linkname main_main main.main 97 func main_main() 98 99 // runtimeInitTime is the nanotime() at which the runtime started. 100 var runtimeInitTime int64 101 102 // Value to use for signal mask for newly created M's. 103 var initSigmask sigset 104 105 // The main goroutine. 106 func main() { 107 g := getg() 108 109 // Racectx of m0->g0 is used only as the parent of the main goroutine. 110 // It must not be used for anything else. 111 g.m.g0.racectx = 0 112 113 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 114 // Using decimal instead of binary GB and MB because 115 // they look nicer in the stack overflow failure message. 116 if sys.PtrSize == 8 { 117 maxstacksize = 1000000000 118 } else { 119 maxstacksize = 250000000 120 } 121 122 // Record when the world started. 123 runtimeInitTime = nanotime() 124 125 systemstack(func() { 126 newm(sysmon, nil) 127 }) 128 129 // Lock the main goroutine onto this, the main OS thread, 130 // during initialization. Most programs won't care, but a few 131 // do require certain calls to be made by the main thread. 132 // Those can arrange for main.main to run in the main thread 133 // by calling runtime.LockOSThread during initialization 134 // to preserve the lock. 135 lockOSThread() 136 137 if g.m != &m0 { 138 throw("runtime.main not on m0") 139 } 140 141 runtime_init() // must be before defer 142 143 // Defer unlock so that runtime.Goexit during init does the unlock too. 144 needUnlock := true 145 defer func() { 146 if needUnlock { 147 unlockOSThread() 148 } 149 }() 150 151 gcenable() 152 153 main_init_done = make(chan bool) 154 if iscgo { 155 if _cgo_thread_start == nil { 156 throw("_cgo_thread_start missing") 157 } 158 if GOOS != "windows" { 159 if _cgo_setenv == nil { 160 throw("_cgo_setenv missing") 161 } 162 if _cgo_unsetenv == nil { 163 throw("_cgo_unsetenv missing") 164 } 165 } 166 if _cgo_notify_runtime_init_done == nil { 167 throw("_cgo_notify_runtime_init_done missing") 168 } 169 cgocall(_cgo_notify_runtime_init_done, nil) 170 } 171 172 fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 173 fn() 174 close(main_init_done) 175 176 needUnlock = false 177 unlockOSThread() 178 179 if isarchive || islibrary { 180 // A program compiled with -buildmode=c-archive or c-shared 181 // has a main, but it is not executed. 182 return 183 } 184 fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 185 fn() 186 if raceenabled { 187 racefini() 188 } 189 190 // Make racy client program work: if panicking on 191 // another goroutine at the same time as main returns, 192 // let the other goroutine finish printing the panic trace. 193 // Once it does, it will exit. See issues 3934 and 20018. 194 if atomic.Load(&runningPanicDefers) != 0 { 195 // Running deferred functions should not take long. 196 for c := 0; c < 1000; c++ { 197 if atomic.Load(&runningPanicDefers) == 0 { 198 break 199 } 200 Gosched() 201 } 202 } 203 if atomic.Load(&panicking) != 0 { 204 gopark(nil, nil, "panicwait", traceEvGoStop, 1) 205 } 206 207 exit(0) 208 for { 209 var x *int32 210 *x = 0 211 } 212 } 213 214 // os_beforeExit is called from os.Exit(0). 215 //go:linkname os_beforeExit os.runtime_beforeExit 216 func os_beforeExit() { 217 if raceenabled { 218 racefini() 219 } 220 } 221 222 // start forcegc helper goroutine 223 func init() { 224 go forcegchelper() 225 } 226 227 func forcegchelper() { 228 forcegc.g = getg() 229 for { 230 lock(&forcegc.lock) 231 if forcegc.idle != 0 { 232 throw("forcegc: phase error") 233 } 234 atomic.Store(&forcegc.idle, 1) 235 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1) 236 // this goroutine is explicitly resumed by sysmon 237 if debug.gctrace > 0 { 238 println("GC forced") 239 } 240 // Time-triggered, fully concurrent. 241 gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()}) 242 } 243 } 244 245 // Gosched yields the processor, allowing other goroutines to run. It does not 246 // suspend the current goroutine, so execution resumes automatically. 247 //go:nosplit 248 func Gosched() { 249 mcall(gosched_m) 250 } 251 252 // goschedguarded yields the processor like gosched, but also checks 253 // for forbidden states and opts out of the yield in those cases. 254 //go:nosplit 255 func goschedguarded() { 256 mcall(goschedguarded_m) 257 } 258 259 // Puts the current goroutine into a waiting state and calls unlockf. 260 // If unlockf returns false, the goroutine is resumed. 261 // unlockf must not access this G's stack, as it may be moved between 262 // the call to gopark and the call to unlockf. 263 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) { 264 mp := acquirem() 265 gp := mp.curg 266 status := readgstatus(gp) 267 if status != _Grunning && status != _Gscanrunning { 268 throw("gopark: bad g status") 269 } 270 mp.waitlock = lock 271 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) 272 gp.waitreason = reason 273 mp.waittraceev = traceEv 274 mp.waittraceskip = traceskip 275 releasem(mp) 276 // can't do anything that might move the G between Ms here. 277 mcall(park_m) 278 } 279 280 // Puts the current goroutine into a waiting state and unlocks the lock. 281 // The goroutine can be made runnable again by calling goready(gp). 282 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) { 283 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 284 } 285 286 func goready(gp *g, traceskip int) { 287 systemstack(func() { 288 ready(gp, traceskip, true) 289 }) 290 } 291 292 //go:nosplit 293 func acquireSudog() *sudog { 294 // Delicate dance: the semaphore implementation calls 295 // acquireSudog, acquireSudog calls new(sudog), 296 // new calls malloc, malloc can call the garbage collector, 297 // and the garbage collector calls the semaphore implementation 298 // in stopTheWorld. 299 // Break the cycle by doing acquirem/releasem around new(sudog). 300 // The acquirem/releasem increments m.locks during new(sudog), 301 // which keeps the garbage collector from being invoked. 302 mp := acquirem() 303 pp := mp.p.ptr() 304 if len(pp.sudogcache) == 0 { 305 lock(&sched.sudoglock) 306 // First, try to grab a batch from central cache. 307 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 308 s := sched.sudogcache 309 sched.sudogcache = s.next 310 s.next = nil 311 pp.sudogcache = append(pp.sudogcache, s) 312 } 313 unlock(&sched.sudoglock) 314 // If the central cache is empty, allocate a new one. 315 if len(pp.sudogcache) == 0 { 316 pp.sudogcache = append(pp.sudogcache, new(sudog)) 317 } 318 } 319 n := len(pp.sudogcache) 320 s := pp.sudogcache[n-1] 321 pp.sudogcache[n-1] = nil 322 pp.sudogcache = pp.sudogcache[:n-1] 323 if s.elem != nil { 324 throw("acquireSudog: found s.elem != nil in cache") 325 } 326 releasem(mp) 327 return s 328 } 329 330 //go:nosplit 331 func releaseSudog(s *sudog) { 332 if s.elem != nil { 333 throw("runtime: sudog with non-nil elem") 334 } 335 if s.isSelect { 336 throw("runtime: sudog with non-false isSelect") 337 } 338 if s.next != nil { 339 throw("runtime: sudog with non-nil next") 340 } 341 if s.prev != nil { 342 throw("runtime: sudog with non-nil prev") 343 } 344 if s.waitlink != nil { 345 throw("runtime: sudog with non-nil waitlink") 346 } 347 if s.c != nil { 348 throw("runtime: sudog with non-nil c") 349 } 350 gp := getg() 351 if gp.param != nil { 352 throw("runtime: releaseSudog with non-nil gp.param") 353 } 354 mp := acquirem() // avoid rescheduling to another P 355 pp := mp.p.ptr() 356 if len(pp.sudogcache) == cap(pp.sudogcache) { 357 // Transfer half of local cache to the central cache. 358 var first, last *sudog 359 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 360 n := len(pp.sudogcache) 361 p := pp.sudogcache[n-1] 362 pp.sudogcache[n-1] = nil 363 pp.sudogcache = pp.sudogcache[:n-1] 364 if first == nil { 365 first = p 366 } else { 367 last.next = p 368 } 369 last = p 370 } 371 lock(&sched.sudoglock) 372 last.next = sched.sudogcache 373 sched.sudogcache = first 374 unlock(&sched.sudoglock) 375 } 376 pp.sudogcache = append(pp.sudogcache, s) 377 releasem(mp) 378 } 379 380 // funcPC returns the entry PC of the function f. 381 // It assumes that f is a func value. Otherwise the behavior is undefined. 382 //go:nosplit 383 func funcPC(f interface{}) uintptr { 384 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize)) 385 } 386 387 // called from assembly 388 func badmcall(fn func(*g)) { 389 throw("runtime: mcall called on m->g0 stack") 390 } 391 392 func badmcall2(fn func(*g)) { 393 throw("runtime: mcall function returned") 394 } 395 396 func badreflectcall() { 397 panic(plainError("arg size to reflect.call more than 1GB")) 398 } 399 400 var badmorestackg0Msg = "fatal: morestack on g0\n" 401 402 //go:nosplit 403 //go:nowritebarrierrec 404 func badmorestackg0() { 405 sp := stringStructOf(&badmorestackg0Msg) 406 write(2, sp.str, int32(sp.len)) 407 } 408 409 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n" 410 411 //go:nosplit 412 //go:nowritebarrierrec 413 func badmorestackgsignal() { 414 sp := stringStructOf(&badmorestackgsignalMsg) 415 write(2, sp.str, int32(sp.len)) 416 } 417 418 //go:nosplit 419 func badctxt() { 420 throw("ctxt != 0") 421 } 422 423 func lockedOSThread() bool { 424 gp := getg() 425 return gp.lockedm != nil && gp.m.lockedg != nil 426 } 427 428 var ( 429 allgs []*g 430 allglock mutex 431 ) 432 433 func allgadd(gp *g) { 434 if readgstatus(gp) == _Gidle { 435 throw("allgadd: bad status Gidle") 436 } 437 438 lock(&allglock) 439 allgs = append(allgs, gp) 440 allglen = uintptr(len(allgs)) 441 unlock(&allglock) 442 } 443 444 const ( 445 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 446 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 447 _GoidCacheBatch = 16 448 ) 449 450 // The bootstrap sequence is: 451 // 452 // call osinit 453 // call schedinit 454 // make & queue new G 455 // call runtime·mstart 456 // 457 // The new G calls runtime·main. 458 func schedinit() { 459 // raceinit must be the first call to race detector. 460 // In particular, it must be done before mallocinit below calls racemapshadow. 461 _g_ := getg() 462 if raceenabled { 463 _g_.racectx, raceprocctx0 = raceinit() 464 } 465 466 sched.maxmcount = 10000 467 468 tracebackinit() 469 moduledataverify() 470 stackinit() 471 mallocinit() 472 mcommoninit(_g_.m) 473 alginit() // maps must not be used before this call 474 modulesinit() // provides activeModules 475 typelinksinit() // uses maps, activeModules 476 itabsinit() // uses activeModules 477 478 msigsave(_g_.m) 479 initSigmask = _g_.m.sigmask 480 481 goargs() 482 goenvs() 483 parsedebugvars() 484 gcinit() 485 486 sched.lastpoll = uint64(nanotime()) 487 procs := ncpu 488 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { 489 procs = n 490 } 491 if procs > _MaxGomaxprocs { 492 procs = _MaxGomaxprocs 493 } 494 if procresize(procs) != nil { 495 throw("unknown runnable goroutine during bootstrap") 496 } 497 498 if buildVersion == "" { 499 // Condition should never trigger. This code just serves 500 // to ensure runtime·buildVersion is kept in the resulting binary. 501 buildVersion = "unknown" 502 } 503 } 504 505 func dumpgstatus(gp *g) { 506 _g_ := getg() 507 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 508 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 509 } 510 511 func checkmcount() { 512 // sched lock is held 513 if sched.mcount > sched.maxmcount { 514 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 515 throw("thread exhaustion") 516 } 517 } 518 519 func mcommoninit(mp *m) { 520 _g_ := getg() 521 522 // g0 stack won't make sense for user (and is not necessary unwindable). 523 if _g_ != _g_.m.g0 { 524 callers(1, mp.createstack[:]) 525 } 526 527 mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks()) 528 if mp.fastrand == 0 { 529 mp.fastrand = 0x49f6428a 530 } 531 532 lock(&sched.lock) 533 mp.id = sched.mcount 534 sched.mcount++ 535 checkmcount() 536 mpreinit(mp) 537 if mp.gsignal != nil { 538 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 539 } 540 541 // Add to allm so garbage collector doesn't free g->m 542 // when it is just in a register or thread-local storage. 543 mp.alllink = allm 544 545 // NumCgoCall() iterates over allm w/o schedlock, 546 // so we need to publish it safely. 547 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 548 unlock(&sched.lock) 549 550 // Allocate memory to hold a cgo traceback if the cgo call crashes. 551 if iscgo || GOOS == "solaris" || GOOS == "windows" { 552 mp.cgoCallers = new(cgoCallers) 553 } 554 } 555 556 // Mark gp ready to run. 557 func ready(gp *g, traceskip int, next bool) { 558 if trace.enabled { 559 traceGoUnpark(gp, traceskip) 560 } 561 562 status := readgstatus(gp) 563 564 // Mark runnable. 565 _g_ := getg() 566 _g_.m.locks++ // disable preemption because it can be holding p in a local var 567 if status&^_Gscan != _Gwaiting { 568 dumpgstatus(gp) 569 throw("bad g->status in ready") 570 } 571 572 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 573 casgstatus(gp, _Gwaiting, _Grunnable) 574 runqput(_g_.m.p.ptr(), gp, next) 575 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 576 wakep() 577 } 578 _g_.m.locks-- 579 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack 580 _g_.stackguard0 = stackPreempt 581 } 582 } 583 584 func gcprocs() int32 { 585 // Figure out how many CPUs to use during GC. 586 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 587 lock(&sched.lock) 588 n := gomaxprocs 589 if n > ncpu { 590 n = ncpu 591 } 592 if n > _MaxGcproc { 593 n = _MaxGcproc 594 } 595 if n > sched.nmidle+1 { // one M is currently running 596 n = sched.nmidle + 1 597 } 598 unlock(&sched.lock) 599 return n 600 } 601 602 func needaddgcproc() bool { 603 lock(&sched.lock) 604 n := gomaxprocs 605 if n > ncpu { 606 n = ncpu 607 } 608 if n > _MaxGcproc { 609 n = _MaxGcproc 610 } 611 n -= sched.nmidle + 1 // one M is currently running 612 unlock(&sched.lock) 613 return n > 0 614 } 615 616 func helpgc(nproc int32) { 617 _g_ := getg() 618 lock(&sched.lock) 619 pos := 0 620 for n := int32(1); n < nproc; n++ { // one M is currently running 621 if allp[pos].mcache == _g_.m.mcache { 622 pos++ 623 } 624 mp := mget() 625 if mp == nil { 626 throw("gcprocs inconsistency") 627 } 628 mp.helpgc = n 629 mp.p.set(allp[pos]) 630 mp.mcache = allp[pos].mcache 631 pos++ 632 notewakeup(&mp.park) 633 } 634 unlock(&sched.lock) 635 } 636 637 // freezeStopWait is a large value that freezetheworld sets 638 // sched.stopwait to in order to request that all Gs permanently stop. 639 const freezeStopWait = 0x7fffffff 640 641 // freezing is set to non-zero if the runtime is trying to freeze the 642 // world. 643 var freezing uint32 644 645 // Similar to stopTheWorld but best-effort and can be called several times. 646 // There is no reverse operation, used during crashing. 647 // This function must not lock any mutexes. 648 func freezetheworld() { 649 atomic.Store(&freezing, 1) 650 // stopwait and preemption requests can be lost 651 // due to races with concurrently executing threads, 652 // so try several times 653 for i := 0; i < 5; i++ { 654 // this should tell the scheduler to not start any new goroutines 655 sched.stopwait = freezeStopWait 656 atomic.Store(&sched.gcwaiting, 1) 657 // this should stop running goroutines 658 if !preemptall() { 659 break // no running goroutines 660 } 661 usleep(1000) 662 } 663 // to be sure 664 usleep(1000) 665 preemptall() 666 usleep(1000) 667 } 668 669 func isscanstatus(status uint32) bool { 670 if status == _Gscan { 671 throw("isscanstatus: Bad status Gscan") 672 } 673 return status&_Gscan == _Gscan 674 } 675 676 // All reads and writes of g's status go through readgstatus, casgstatus 677 // castogscanstatus, casfrom_Gscanstatus. 678 //go:nosplit 679 func readgstatus(gp *g) uint32 { 680 return atomic.Load(&gp.atomicstatus) 681 } 682 683 // Ownership of gcscanvalid: 684 // 685 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), 686 // then gp owns gp.gcscanvalid, and other goroutines must not modify it. 687 // 688 // Otherwise, a second goroutine can lock the scan state by setting _Gscan 689 // in the status bit and then modify gcscanvalid, and then unlock the scan state. 690 // 691 // Note that the first condition implies an exception to the second: 692 // if a second goroutine changes gp's status to _Grunning|_Gscan, 693 // that second goroutine still does not have the right to modify gcscanvalid. 694 695 // The Gscanstatuses are acting like locks and this releases them. 696 // If it proves to be a performance hit we should be able to make these 697 // simple atomic stores but for now we are going to throw if 698 // we see an inconsistent state. 699 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 700 success := false 701 702 // Check that transition is valid. 703 switch oldval { 704 default: 705 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 706 dumpgstatus(gp) 707 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 708 case _Gscanrunnable, 709 _Gscanwaiting, 710 _Gscanrunning, 711 _Gscansyscall: 712 if newval == oldval&^_Gscan { 713 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 714 } 715 } 716 if !success { 717 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 718 dumpgstatus(gp) 719 throw("casfrom_Gscanstatus: gp->status is not in scan state") 720 } 721 } 722 723 // This will return false if the gp is not in the expected status and the cas fails. 724 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 725 func castogscanstatus(gp *g, oldval, newval uint32) bool { 726 switch oldval { 727 case _Grunnable, 728 _Grunning, 729 _Gwaiting, 730 _Gsyscall: 731 if newval == oldval|_Gscan { 732 return atomic.Cas(&gp.atomicstatus, oldval, newval) 733 } 734 } 735 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 736 throw("castogscanstatus") 737 panic("not reached") 738 } 739 740 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 741 // and casfrom_Gscanstatus instead. 742 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 743 // put it in the Gscan state is finished. 744 //go:nosplit 745 func casgstatus(gp *g, oldval, newval uint32) { 746 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 747 systemstack(func() { 748 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 749 throw("casgstatus: bad incoming values") 750 }) 751 } 752 753 if oldval == _Grunning && gp.gcscanvalid { 754 // If oldvall == _Grunning, then the actual status must be 755 // _Grunning or _Grunning|_Gscan; either way, 756 // we own gp.gcscanvalid, so it's safe to read. 757 // gp.gcscanvalid must not be true when we are running. 758 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n") 759 throw("casgstatus") 760 } 761 762 // See http://golang.org/cl/21503 for justification of the yield delay. 763 const yieldDelay = 5 * 1000 764 var nextYield int64 765 766 // loop if gp->atomicstatus is in a scan state giving 767 // GC time to finish and change the state to oldval. 768 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { 769 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 770 systemstack(func() { 771 throw("casgstatus: waiting for Gwaiting but is Grunnable") 772 }) 773 } 774 // Help GC if needed. 775 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { 776 // gp.preemptscan = false 777 // systemstack(func() { 778 // gcphasework(gp) 779 // }) 780 // } 781 // But meanwhile just yield. 782 if i == 0 { 783 nextYield = nanotime() + yieldDelay 784 } 785 if nanotime() < nextYield { 786 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { 787 procyield(1) 788 } 789 } else { 790 osyield() 791 nextYield = nanotime() + yieldDelay/2 792 } 793 } 794 if newval == _Grunning { 795 gp.gcscanvalid = false 796 } 797 } 798 799 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 800 // Returns old status. Cannot call casgstatus directly, because we are racing with an 801 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 802 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 803 // it would loop waiting for the status to go back to Gwaiting, which it never will. 804 //go:nosplit 805 func casgcopystack(gp *g) uint32 { 806 for { 807 oldstatus := readgstatus(gp) &^ _Gscan 808 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 809 throw("copystack: bad status, not Gwaiting or Grunnable") 810 } 811 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 812 return oldstatus 813 } 814 } 815 } 816 817 // scang blocks until gp's stack has been scanned. 818 // It might be scanned by scang or it might be scanned by the goroutine itself. 819 // Either way, the stack scan has completed when scang returns. 820 func scang(gp *g, gcw *gcWork) { 821 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone. 822 // Nothing is racing with us now, but gcscandone might be set to true left over 823 // from an earlier round of stack scanning (we scan twice per GC). 824 // We use gcscandone to record whether the scan has been done during this round. 825 826 gp.gcscandone = false 827 828 // See http://golang.org/cl/21503 for justification of the yield delay. 829 const yieldDelay = 10 * 1000 830 var nextYield int64 831 832 // Endeavor to get gcscandone set to true, 833 // either by doing the stack scan ourselves or by coercing gp to scan itself. 834 // gp.gcscandone can transition from false to true when we're not looking 835 // (if we asked for preemption), so any time we lock the status using 836 // castogscanstatus we have to double-check that the scan is still not done. 837 loop: 838 for i := 0; !gp.gcscandone; i++ { 839 switch s := readgstatus(gp); s { 840 default: 841 dumpgstatus(gp) 842 throw("stopg: invalid status") 843 844 case _Gdead: 845 // No stack. 846 gp.gcscandone = true 847 break loop 848 849 case _Gcopystack: 850 // Stack being switched. Go around again. 851 852 case _Grunnable, _Gsyscall, _Gwaiting: 853 // Claim goroutine by setting scan bit. 854 // Racing with execution or readying of gp. 855 // The scan bit keeps them from running 856 // the goroutine until we're done. 857 if castogscanstatus(gp, s, s|_Gscan) { 858 if !gp.gcscandone { 859 scanstack(gp, gcw) 860 gp.gcscandone = true 861 } 862 restartg(gp) 863 break loop 864 } 865 866 case _Gscanwaiting: 867 // newstack is doing a scan for us right now. Wait. 868 869 case _Grunning: 870 // Goroutine running. Try to preempt execution so it can scan itself. 871 // The preemption handler (in newstack) does the actual scan. 872 873 // Optimization: if there is already a pending preemption request 874 // (from the previous loop iteration), don't bother with the atomics. 875 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt { 876 break 877 } 878 879 // Ask for preemption and self scan. 880 if castogscanstatus(gp, _Grunning, _Gscanrunning) { 881 if !gp.gcscandone { 882 gp.preemptscan = true 883 gp.preempt = true 884 gp.stackguard0 = stackPreempt 885 } 886 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning) 887 } 888 } 889 890 if i == 0 { 891 nextYield = nanotime() + yieldDelay 892 } 893 if nanotime() < nextYield { 894 procyield(10) 895 } else { 896 osyield() 897 nextYield = nanotime() + yieldDelay/2 898 } 899 } 900 901 gp.preemptscan = false // cancel scan request if no longer needed 902 } 903 904 // The GC requests that this routine be moved from a scanmumble state to a mumble state. 905 func restartg(gp *g) { 906 s := readgstatus(gp) 907 switch s { 908 default: 909 dumpgstatus(gp) 910 throw("restartg: unexpected status") 911 912 case _Gdead: 913 // ok 914 915 case _Gscanrunnable, 916 _Gscanwaiting, 917 _Gscansyscall: 918 casfrom_Gscanstatus(gp, s, s&^_Gscan) 919 } 920 } 921 922 // stopTheWorld stops all P's from executing goroutines, interrupting 923 // all goroutines at GC safe points and records reason as the reason 924 // for the stop. On return, only the current goroutine's P is running. 925 // stopTheWorld must not be called from a system stack and the caller 926 // must not hold worldsema. The caller must call startTheWorld when 927 // other P's should resume execution. 928 // 929 // stopTheWorld is safe for multiple goroutines to call at the 930 // same time. Each will execute its own stop, and the stops will 931 // be serialized. 932 // 933 // This is also used by routines that do stack dumps. If the system is 934 // in panic or being exited, this may not reliably stop all 935 // goroutines. 936 func stopTheWorld(reason string) { 937 semacquire(&worldsema) 938 getg().m.preemptoff = reason 939 systemstack(stopTheWorldWithSema) 940 } 941 942 // startTheWorld undoes the effects of stopTheWorld. 943 func startTheWorld() { 944 systemstack(func() { startTheWorldWithSema() }) 945 // worldsema must be held over startTheWorldWithSema to ensure 946 // gomaxprocs cannot change while worldsema is held. 947 semrelease(&worldsema) 948 getg().m.preemptoff = "" 949 } 950 951 // Holding worldsema grants an M the right to try to stop the world 952 // and prevents gomaxprocs from changing concurrently. 953 var worldsema uint32 = 1 954 955 // stopTheWorldWithSema is the core implementation of stopTheWorld. 956 // The caller is responsible for acquiring worldsema and disabling 957 // preemption first and then should stopTheWorldWithSema on the system 958 // stack: 959 // 960 // semacquire(&worldsema, 0) 961 // m.preemptoff = "reason" 962 // systemstack(stopTheWorldWithSema) 963 // 964 // When finished, the caller must either call startTheWorld or undo 965 // these three operations separately: 966 // 967 // m.preemptoff = "" 968 // systemstack(startTheWorldWithSema) 969 // semrelease(&worldsema) 970 // 971 // It is allowed to acquire worldsema once and then execute multiple 972 // startTheWorldWithSema/stopTheWorldWithSema pairs. 973 // Other P's are able to execute between successive calls to 974 // startTheWorldWithSema and stopTheWorldWithSema. 975 // Holding worldsema causes any other goroutines invoking 976 // stopTheWorld to block. 977 func stopTheWorldWithSema() { 978 _g_ := getg() 979 980 // If we hold a lock, then we won't be able to stop another M 981 // that is blocked trying to acquire the lock. 982 if _g_.m.locks > 0 { 983 throw("stopTheWorld: holding locks") 984 } 985 986 lock(&sched.lock) 987 sched.stopwait = gomaxprocs 988 atomic.Store(&sched.gcwaiting, 1) 989 preemptall() 990 // stop current P 991 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 992 sched.stopwait-- 993 // try to retake all P's in Psyscall status 994 for i := 0; i < int(gomaxprocs); i++ { 995 p := allp[i] 996 s := p.status 997 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 998 if trace.enabled { 999 traceGoSysBlock(p) 1000 traceProcStop(p) 1001 } 1002 p.syscalltick++ 1003 sched.stopwait-- 1004 } 1005 } 1006 // stop idle P's 1007 for { 1008 p := pidleget() 1009 if p == nil { 1010 break 1011 } 1012 p.status = _Pgcstop 1013 sched.stopwait-- 1014 } 1015 wait := sched.stopwait > 0 1016 unlock(&sched.lock) 1017 1018 // wait for remaining P's to stop voluntarily 1019 if wait { 1020 for { 1021 // wait for 100us, then try to re-preempt in case of any races 1022 if notetsleep(&sched.stopnote, 100*1000) { 1023 noteclear(&sched.stopnote) 1024 break 1025 } 1026 preemptall() 1027 } 1028 } 1029 1030 // sanity checks 1031 bad := "" 1032 if sched.stopwait != 0 { 1033 bad = "stopTheWorld: not stopped (stopwait != 0)" 1034 } else { 1035 for i := 0; i < int(gomaxprocs); i++ { 1036 p := allp[i] 1037 if p.status != _Pgcstop { 1038 bad = "stopTheWorld: not stopped (status != _Pgcstop)" 1039 } 1040 } 1041 } 1042 if atomic.Load(&freezing) != 0 { 1043 // Some other thread is panicking. This can cause the 1044 // sanity checks above to fail if the panic happens in 1045 // the signal handler on a stopped thread. Either way, 1046 // we should halt this thread. 1047 lock(&deadlock) 1048 lock(&deadlock) 1049 } 1050 if bad != "" { 1051 throw(bad) 1052 } 1053 } 1054 1055 func mhelpgc() { 1056 _g_ := getg() 1057 _g_.m.helpgc = -1 1058 } 1059 1060 func startTheWorldWithSema() int64 { 1061 _g_ := getg() 1062 1063 _g_.m.locks++ // disable preemption because it can be holding p in a local var 1064 gp := netpoll(false) // non-blocking 1065 injectglist(gp) 1066 add := needaddgcproc() 1067 lock(&sched.lock) 1068 1069 procs := gomaxprocs 1070 if newprocs != 0 { 1071 procs = newprocs 1072 newprocs = 0 1073 } 1074 p1 := procresize(procs) 1075 sched.gcwaiting = 0 1076 if sched.sysmonwait != 0 { 1077 sched.sysmonwait = 0 1078 notewakeup(&sched.sysmonnote) 1079 } 1080 unlock(&sched.lock) 1081 1082 for p1 != nil { 1083 p := p1 1084 p1 = p1.link.ptr() 1085 if p.m != 0 { 1086 mp := p.m.ptr() 1087 p.m = 0 1088 if mp.nextp != 0 { 1089 throw("startTheWorld: inconsistent mp->nextp") 1090 } 1091 mp.nextp.set(p) 1092 notewakeup(&mp.park) 1093 } else { 1094 // Start M to run P. Do not start another M below. 1095 newm(nil, p) 1096 add = false 1097 } 1098 } 1099 1100 // Capture start-the-world time before doing clean-up tasks. 1101 startTime := nanotime() 1102 1103 // Wakeup an additional proc in case we have excessive runnable goroutines 1104 // in local queues or in the global queue. If we don't, the proc will park itself. 1105 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 1106 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 1107 wakep() 1108 } 1109 1110 if add { 1111 // If GC could have used another helper proc, start one now, 1112 // in the hope that it will be available next time. 1113 // It would have been even better to start it before the collection, 1114 // but doing so requires allocating memory, so it's tricky to 1115 // coordinate. This lazy approach works out in practice: 1116 // we don't mind if the first couple gc rounds don't have quite 1117 // the maximum number of procs. 1118 newm(mhelpgc, nil) 1119 } 1120 _g_.m.locks-- 1121 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1122 _g_.stackguard0 = stackPreempt 1123 } 1124 1125 return startTime 1126 } 1127 1128 // Called to start an M. 1129 //go:nosplit 1130 func mstart() { 1131 _g_ := getg() 1132 1133 if _g_.stack.lo == 0 { 1134 // Initialize stack bounds from system stack. 1135 // Cgo may have left stack size in stack.hi. 1136 size := _g_.stack.hi 1137 if size == 0 { 1138 size = 8192 * sys.StackGuardMultiplier 1139 } 1140 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1141 _g_.stack.lo = _g_.stack.hi - size + 1024 1142 } 1143 // Initialize stack guards so that we can start calling 1144 // both Go and C functions with stack growth prologues. 1145 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1146 _g_.stackguard1 = _g_.stackguard0 1147 mstart1() 1148 } 1149 1150 func mstart1() { 1151 _g_ := getg() 1152 1153 if _g_ != _g_.m.g0 { 1154 throw("bad runtime·mstart") 1155 } 1156 1157 // Record top of stack for use by mcall. 1158 // Once we call schedule we're never coming back, 1159 // so other calls can reuse this stack space. 1160 gosave(&_g_.m.g0.sched) 1161 _g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used 1162 asminit() 1163 minit() 1164 1165 // Install signal handlers; after minit so that minit can 1166 // prepare the thread to be able to handle the signals. 1167 if _g_.m == &m0 { 1168 // Create an extra M for callbacks on threads not created by Go. 1169 if iscgo && !cgoHasExtraM { 1170 cgoHasExtraM = true 1171 newextram() 1172 } 1173 initsig(false) 1174 } 1175 1176 if fn := _g_.m.mstartfn; fn != nil { 1177 fn() 1178 } 1179 1180 if _g_.m.helpgc != 0 { 1181 _g_.m.helpgc = 0 1182 stopm() 1183 } else if _g_.m != &m0 { 1184 acquirep(_g_.m.nextp.ptr()) 1185 _g_.m.nextp = 0 1186 } 1187 schedule() 1188 } 1189 1190 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1191 // If a P is currently executing code, this will bring the P to a GC 1192 // safe point and execute fn on that P. If the P is not executing code 1193 // (it is idle or in a syscall), this will call fn(p) directly while 1194 // preventing the P from exiting its state. This does not ensure that 1195 // fn will run on every CPU executing Go code, but it acts as a global 1196 // memory barrier. GC uses this as a "ragged barrier." 1197 // 1198 // The caller must hold worldsema. 1199 // 1200 //go:systemstack 1201 func forEachP(fn func(*p)) { 1202 mp := acquirem() 1203 _p_ := getg().m.p.ptr() 1204 1205 lock(&sched.lock) 1206 if sched.safePointWait != 0 { 1207 throw("forEachP: sched.safePointWait != 0") 1208 } 1209 sched.safePointWait = gomaxprocs - 1 1210 sched.safePointFn = fn 1211 1212 // Ask all Ps to run the safe point function. 1213 for _, p := range allp[:gomaxprocs] { 1214 if p != _p_ { 1215 atomic.Store(&p.runSafePointFn, 1) 1216 } 1217 } 1218 preemptall() 1219 1220 // Any P entering _Pidle or _Psyscall from now on will observe 1221 // p.runSafePointFn == 1 and will call runSafePointFn when 1222 // changing its status to _Pidle/_Psyscall. 1223 1224 // Run safe point function for all idle Ps. sched.pidle will 1225 // not change because we hold sched.lock. 1226 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1227 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1228 fn(p) 1229 sched.safePointWait-- 1230 } 1231 } 1232 1233 wait := sched.safePointWait > 0 1234 unlock(&sched.lock) 1235 1236 // Run fn for the current P. 1237 fn(_p_) 1238 1239 // Force Ps currently in _Psyscall into _Pidle and hand them 1240 // off to induce safe point function execution. 1241 for i := 0; i < int(gomaxprocs); i++ { 1242 p := allp[i] 1243 s := p.status 1244 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1245 if trace.enabled { 1246 traceGoSysBlock(p) 1247 traceProcStop(p) 1248 } 1249 p.syscalltick++ 1250 handoffp(p) 1251 } 1252 } 1253 1254 // Wait for remaining Ps to run fn. 1255 if wait { 1256 for { 1257 // Wait for 100us, then try to re-preempt in 1258 // case of any races. 1259 // 1260 // Requires system stack. 1261 if notetsleep(&sched.safePointNote, 100*1000) { 1262 noteclear(&sched.safePointNote) 1263 break 1264 } 1265 preemptall() 1266 } 1267 } 1268 if sched.safePointWait != 0 { 1269 throw("forEachP: not done") 1270 } 1271 for i := 0; i < int(gomaxprocs); i++ { 1272 p := allp[i] 1273 if p.runSafePointFn != 0 { 1274 throw("forEachP: P did not run fn") 1275 } 1276 } 1277 1278 lock(&sched.lock) 1279 sched.safePointFn = nil 1280 unlock(&sched.lock) 1281 releasem(mp) 1282 } 1283 1284 // runSafePointFn runs the safe point function, if any, for this P. 1285 // This should be called like 1286 // 1287 // if getg().m.p.runSafePointFn != 0 { 1288 // runSafePointFn() 1289 // } 1290 // 1291 // runSafePointFn must be checked on any transition in to _Pidle or 1292 // _Psyscall to avoid a race where forEachP sees that the P is running 1293 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1294 // nor the P run the safe-point function. 1295 func runSafePointFn() { 1296 p := getg().m.p.ptr() 1297 // Resolve the race between forEachP running the safe-point 1298 // function on this P's behalf and this P running the 1299 // safe-point function directly. 1300 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1301 return 1302 } 1303 sched.safePointFn(p) 1304 lock(&sched.lock) 1305 sched.safePointWait-- 1306 if sched.safePointWait == 0 { 1307 notewakeup(&sched.safePointNote) 1308 } 1309 unlock(&sched.lock) 1310 } 1311 1312 // When running with cgo, we call _cgo_thread_start 1313 // to start threads for us so that we can play nicely with 1314 // foreign code. 1315 var cgoThreadStart unsafe.Pointer 1316 1317 type cgothreadstart struct { 1318 g guintptr 1319 tls *uint64 1320 fn unsafe.Pointer 1321 } 1322 1323 // Allocate a new m unassociated with any thread. 1324 // Can use p for allocation context if needed. 1325 // fn is recorded as the new m's m.mstartfn. 1326 // 1327 // This function is allowed to have write barriers even if the caller 1328 // isn't because it borrows _p_. 1329 // 1330 //go:yeswritebarrierrec 1331 func allocm(_p_ *p, fn func()) *m { 1332 _g_ := getg() 1333 _g_.m.locks++ // disable GC because it can be called from sysmon 1334 if _g_.m.p == 0 { 1335 acquirep(_p_) // temporarily borrow p for mallocs in this function 1336 } 1337 mp := new(m) 1338 mp.mstartfn = fn 1339 mcommoninit(mp) 1340 1341 // In case of cgo or Solaris, pthread_create will make us a stack. 1342 // Windows and Plan 9 will layout sched stack on OS stack. 1343 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" { 1344 mp.g0 = malg(-1) 1345 } else { 1346 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1347 } 1348 mp.g0.m = mp 1349 1350 if _p_ == _g_.m.p.ptr() { 1351 releasep() 1352 } 1353 _g_.m.locks-- 1354 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1355 _g_.stackguard0 = stackPreempt 1356 } 1357 1358 return mp 1359 } 1360 1361 // needm is called when a cgo callback happens on a 1362 // thread without an m (a thread not created by Go). 1363 // In this case, needm is expected to find an m to use 1364 // and return with m, g initialized correctly. 1365 // Since m and g are not set now (likely nil, but see below) 1366 // needm is limited in what routines it can call. In particular 1367 // it can only call nosplit functions (textflag 7) and cannot 1368 // do any scheduling that requires an m. 1369 // 1370 // In order to avoid needing heavy lifting here, we adopt 1371 // the following strategy: there is a stack of available m's 1372 // that can be stolen. Using compare-and-swap 1373 // to pop from the stack has ABA races, so we simulate 1374 // a lock by doing an exchange (via casp) to steal the stack 1375 // head and replace the top pointer with MLOCKED (1). 1376 // This serves as a simple spin lock that we can use even 1377 // without an m. The thread that locks the stack in this way 1378 // unlocks the stack by storing a valid stack head pointer. 1379 // 1380 // In order to make sure that there is always an m structure 1381 // available to be stolen, we maintain the invariant that there 1382 // is always one more than needed. At the beginning of the 1383 // program (if cgo is in use) the list is seeded with a single m. 1384 // If needm finds that it has taken the last m off the list, its job 1385 // is - once it has installed its own m so that it can do things like 1386 // allocate memory - to create a spare m and put it on the list. 1387 // 1388 // Each of these extra m's also has a g0 and a curg that are 1389 // pressed into service as the scheduling stack and current 1390 // goroutine for the duration of the cgo callback. 1391 // 1392 // When the callback is done with the m, it calls dropm to 1393 // put the m back on the list. 1394 //go:nosplit 1395 func needm(x byte) { 1396 if iscgo && !cgoHasExtraM { 1397 // Can happen if C/C++ code calls Go from a global ctor. 1398 // Can not throw, because scheduler is not initialized yet. 1399 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1400 exit(1) 1401 } 1402 1403 // Lock extra list, take head, unlock popped list. 1404 // nilokay=false is safe here because of the invariant above, 1405 // that the extra list always contains or will soon contain 1406 // at least one m. 1407 mp := lockextra(false) 1408 1409 // Set needextram when we've just emptied the list, 1410 // so that the eventual call into cgocallbackg will 1411 // allocate a new m for the extra list. We delay the 1412 // allocation until then so that it can be done 1413 // after exitsyscall makes sure it is okay to be 1414 // running at all (that is, there's no garbage collection 1415 // running right now). 1416 mp.needextram = mp.schedlink == 0 1417 extraMCount-- 1418 unlockextra(mp.schedlink.ptr()) 1419 1420 // Save and block signals before installing g. 1421 // Once g is installed, any incoming signals will try to execute, 1422 // but we won't have the sigaltstack settings and other data 1423 // set up appropriately until the end of minit, which will 1424 // unblock the signals. This is the same dance as when 1425 // starting a new m to run Go code via newosproc. 1426 msigsave(mp) 1427 sigblock() 1428 1429 // Install g (= m->g0) and set the stack bounds 1430 // to match the current stack. We don't actually know 1431 // how big the stack is, like we don't know how big any 1432 // scheduling stack is, but we assume there's at least 32 kB, 1433 // which is more than enough for us. 1434 setg(mp.g0) 1435 _g_ := getg() 1436 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 1437 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 1438 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1439 1440 // Initialize this thread to use the m. 1441 asminit() 1442 minit() 1443 1444 // mp.curg is now a real goroutine. 1445 casgstatus(mp.curg, _Gdead, _Gsyscall) 1446 atomic.Xadd(&sched.ngsys, -1) 1447 } 1448 1449 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 1450 1451 // newextram allocates m's and puts them on the extra list. 1452 // It is called with a working local m, so that it can do things 1453 // like call schedlock and allocate. 1454 func newextram() { 1455 c := atomic.Xchg(&extraMWaiters, 0) 1456 if c > 0 { 1457 for i := uint32(0); i < c; i++ { 1458 oneNewExtraM() 1459 } 1460 } else { 1461 // Make sure there is at least one extra M. 1462 mp := lockextra(true) 1463 unlockextra(mp) 1464 if mp == nil { 1465 oneNewExtraM() 1466 } 1467 } 1468 } 1469 1470 // oneNewExtraM allocates an m and puts it on the extra list. 1471 func oneNewExtraM() { 1472 // Create extra goroutine locked to extra m. 1473 // The goroutine is the context in which the cgo callback will run. 1474 // The sched.pc will never be returned to, but setting it to 1475 // goexit makes clear to the traceback routines where 1476 // the goroutine stack ends. 1477 mp := allocm(nil, nil) 1478 gp := malg(4096) 1479 gp.sched.pc = funcPC(goexit) + sys.PCQuantum 1480 gp.sched.sp = gp.stack.hi 1481 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame 1482 gp.sched.lr = 0 1483 gp.sched.g = guintptr(unsafe.Pointer(gp)) 1484 gp.syscallpc = gp.sched.pc 1485 gp.syscallsp = gp.sched.sp 1486 gp.stktopsp = gp.sched.sp 1487 gp.gcscanvalid = true 1488 gp.gcscandone = true 1489 // malg returns status as _Gidle. Change to _Gdead before 1490 // adding to allg where GC can see it. We use _Gdead to hide 1491 // this from tracebacks and stack scans since it isn't a 1492 // "real" goroutine until needm grabs it. 1493 casgstatus(gp, _Gidle, _Gdead) 1494 gp.m = mp 1495 mp.curg = gp 1496 mp.locked = _LockInternal 1497 mp.lockedg = gp 1498 gp.lockedm = mp 1499 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 1500 if raceenabled { 1501 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum) 1502 } 1503 // put on allg for garbage collector 1504 allgadd(gp) 1505 1506 // gp is now on the allg list, but we don't want it to be 1507 // counted by gcount. It would be more "proper" to increment 1508 // sched.ngfree, but that requires locking. Incrementing ngsys 1509 // has the same effect. 1510 atomic.Xadd(&sched.ngsys, +1) 1511 1512 // Add m to the extra list. 1513 mnext := lockextra(true) 1514 mp.schedlink.set(mnext) 1515 extraMCount++ 1516 unlockextra(mp) 1517 } 1518 1519 // dropm is called when a cgo callback has called needm but is now 1520 // done with the callback and returning back into the non-Go thread. 1521 // It puts the current m back onto the extra list. 1522 // 1523 // The main expense here is the call to signalstack to release the 1524 // m's signal stack, and then the call to needm on the next callback 1525 // from this thread. It is tempting to try to save the m for next time, 1526 // which would eliminate both these costs, but there might not be 1527 // a next time: the current thread (which Go does not control) might exit. 1528 // If we saved the m for that thread, there would be an m leak each time 1529 // such a thread exited. Instead, we acquire and release an m on each 1530 // call. These should typically not be scheduling operations, just a few 1531 // atomics, so the cost should be small. 1532 // 1533 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1534 // variable using pthread_key_create. Unlike the pthread keys we already use 1535 // on OS X, this dummy key would never be read by Go code. It would exist 1536 // only so that we could register at thread-exit-time destructor. 1537 // That destructor would put the m back onto the extra list. 1538 // This is purely a performance optimization. The current version, 1539 // in which dropm happens on each cgo call, is still correct too. 1540 // We may have to keep the current version on systems with cgo 1541 // but without pthreads, like Windows. 1542 func dropm() { 1543 // Clear m and g, and return m to the extra list. 1544 // After the call to setg we can only call nosplit functions 1545 // with no pointer manipulation. 1546 mp := getg().m 1547 1548 // Return mp.curg to dead state. 1549 casgstatus(mp.curg, _Gsyscall, _Gdead) 1550 atomic.Xadd(&sched.ngsys, +1) 1551 1552 // Block signals before unminit. 1553 // Unminit unregisters the signal handling stack (but needs g on some systems). 1554 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 1555 // It's important not to try to handle a signal between those two steps. 1556 sigmask := mp.sigmask 1557 sigblock() 1558 unminit() 1559 1560 mnext := lockextra(true) 1561 extraMCount++ 1562 mp.schedlink.set(mnext) 1563 1564 setg(nil) 1565 1566 // Commit the release of mp. 1567 unlockextra(mp) 1568 1569 msigrestore(sigmask) 1570 } 1571 1572 // A helper function for EnsureDropM. 1573 func getm() uintptr { 1574 return uintptr(unsafe.Pointer(getg().m)) 1575 } 1576 1577 var extram uintptr 1578 var extraMCount uint32 // Protected by lockextra 1579 var extraMWaiters uint32 1580 1581 // lockextra locks the extra list and returns the list head. 1582 // The caller must unlock the list by storing a new list head 1583 // to extram. If nilokay is true, then lockextra will 1584 // return a nil list head if that's what it finds. If nilokay is false, 1585 // lockextra will keep waiting until the list head is no longer nil. 1586 //go:nosplit 1587 func lockextra(nilokay bool) *m { 1588 const locked = 1 1589 1590 incr := false 1591 for { 1592 old := atomic.Loaduintptr(&extram) 1593 if old == locked { 1594 yield := osyield 1595 yield() 1596 continue 1597 } 1598 if old == 0 && !nilokay { 1599 if !incr { 1600 // Add 1 to the number of threads 1601 // waiting for an M. 1602 // This is cleared by newextram. 1603 atomic.Xadd(&extraMWaiters, 1) 1604 incr = true 1605 } 1606 usleep(1) 1607 continue 1608 } 1609 if atomic.Casuintptr(&extram, old, locked) { 1610 return (*m)(unsafe.Pointer(old)) 1611 } 1612 yield := osyield 1613 yield() 1614 continue 1615 } 1616 } 1617 1618 //go:nosplit 1619 func unlockextra(mp *m) { 1620 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1621 } 1622 1623 // execLock serializes exec and clone to avoid bugs or unspecified behaviour 1624 // around exec'ing while creating/destroying threads. See issue #19546. 1625 var execLock rwmutex 1626 1627 // Create a new m. It will start off with a call to fn, or else the scheduler. 1628 // fn needs to be static and not a heap allocated closure. 1629 // May run with m.p==nil, so write barriers are not allowed. 1630 //go:nowritebarrierrec 1631 func newm(fn func(), _p_ *p) { 1632 mp := allocm(_p_, fn) 1633 mp.nextp.set(_p_) 1634 mp.sigmask = initSigmask 1635 if iscgo { 1636 var ts cgothreadstart 1637 if _cgo_thread_start == nil { 1638 throw("_cgo_thread_start missing") 1639 } 1640 ts.g.set(mp.g0) 1641 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1642 ts.fn = unsafe.Pointer(funcPC(mstart)) 1643 if msanenabled { 1644 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 1645 } 1646 execLock.rlock() // Prevent process clone. 1647 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1648 execLock.runlock() 1649 return 1650 } 1651 execLock.rlock() // Prevent process clone. 1652 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi)) 1653 execLock.runlock() 1654 } 1655 1656 // Stops execution of the current m until new work is available. 1657 // Returns with acquired P. 1658 func stopm() { 1659 _g_ := getg() 1660 1661 if _g_.m.locks != 0 { 1662 throw("stopm holding locks") 1663 } 1664 if _g_.m.p != 0 { 1665 throw("stopm holding p") 1666 } 1667 if _g_.m.spinning { 1668 throw("stopm spinning") 1669 } 1670 1671 retry: 1672 lock(&sched.lock) 1673 mput(_g_.m) 1674 unlock(&sched.lock) 1675 notesleep(&_g_.m.park) 1676 noteclear(&_g_.m.park) 1677 if _g_.m.helpgc != 0 { 1678 gchelper() 1679 _g_.m.helpgc = 0 1680 _g_.m.mcache = nil 1681 _g_.m.p = 0 1682 goto retry 1683 } 1684 acquirep(_g_.m.nextp.ptr()) 1685 _g_.m.nextp = 0 1686 } 1687 1688 func mspinning() { 1689 // startm's caller incremented nmspinning. Set the new M's spinning. 1690 getg().m.spinning = true 1691 } 1692 1693 // Schedules some M to run the p (creates an M if necessary). 1694 // If p==nil, tries to get an idle P, if no idle P's does nothing. 1695 // May run with m.p==nil, so write barriers are not allowed. 1696 // If spinning is set, the caller has incremented nmspinning and startm will 1697 // either decrement nmspinning or set m.spinning in the newly started M. 1698 //go:nowritebarrierrec 1699 func startm(_p_ *p, spinning bool) { 1700 lock(&sched.lock) 1701 if _p_ == nil { 1702 _p_ = pidleget() 1703 if _p_ == nil { 1704 unlock(&sched.lock) 1705 if spinning { 1706 // The caller incremented nmspinning, but there are no idle Ps, 1707 // so it's okay to just undo the increment and give up. 1708 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1709 throw("startm: negative nmspinning") 1710 } 1711 } 1712 return 1713 } 1714 } 1715 mp := mget() 1716 unlock(&sched.lock) 1717 if mp == nil { 1718 var fn func() 1719 if spinning { 1720 // The caller incremented nmspinning, so set m.spinning in the new M. 1721 fn = mspinning 1722 } 1723 newm(fn, _p_) 1724 return 1725 } 1726 if mp.spinning { 1727 throw("startm: m is spinning") 1728 } 1729 if mp.nextp != 0 { 1730 throw("startm: m has p") 1731 } 1732 if spinning && !runqempty(_p_) { 1733 throw("startm: p has runnable gs") 1734 } 1735 // The caller incremented nmspinning, so set m.spinning in the new M. 1736 mp.spinning = spinning 1737 mp.nextp.set(_p_) 1738 notewakeup(&mp.park) 1739 } 1740 1741 // Hands off P from syscall or locked M. 1742 // Always runs without a P, so write barriers are not allowed. 1743 //go:nowritebarrierrec 1744 func handoffp(_p_ *p) { 1745 // handoffp must start an M in any situation where 1746 // findrunnable would return a G to run on _p_. 1747 1748 // if it has local work, start it straight away 1749 if !runqempty(_p_) || sched.runqsize != 0 { 1750 startm(_p_, false) 1751 return 1752 } 1753 // if it has GC work, start it straight away 1754 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 1755 startm(_p_, false) 1756 return 1757 } 1758 // no local work, check that there are no spinning/idle M's, 1759 // otherwise our help is not required 1760 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 1761 startm(_p_, true) 1762 return 1763 } 1764 lock(&sched.lock) 1765 if sched.gcwaiting != 0 { 1766 _p_.status = _Pgcstop 1767 sched.stopwait-- 1768 if sched.stopwait == 0 { 1769 notewakeup(&sched.stopnote) 1770 } 1771 unlock(&sched.lock) 1772 return 1773 } 1774 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 1775 sched.safePointFn(_p_) 1776 sched.safePointWait-- 1777 if sched.safePointWait == 0 { 1778 notewakeup(&sched.safePointNote) 1779 } 1780 } 1781 if sched.runqsize != 0 { 1782 unlock(&sched.lock) 1783 startm(_p_, false) 1784 return 1785 } 1786 // If this is the last running P and nobody is polling network, 1787 // need to wakeup another M to poll network. 1788 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 1789 unlock(&sched.lock) 1790 startm(_p_, false) 1791 return 1792 } 1793 pidleput(_p_) 1794 unlock(&sched.lock) 1795 } 1796 1797 // Tries to add one more P to execute G's. 1798 // Called when a G is made runnable (newproc, ready). 1799 func wakep() { 1800 // be conservative about spinning threads 1801 if !atomic.Cas(&sched.nmspinning, 0, 1) { 1802 return 1803 } 1804 startm(nil, true) 1805 } 1806 1807 // Stops execution of the current m that is locked to a g until the g is runnable again. 1808 // Returns with acquired P. 1809 func stoplockedm() { 1810 _g_ := getg() 1811 1812 if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m { 1813 throw("stoplockedm: inconsistent locking") 1814 } 1815 if _g_.m.p != 0 { 1816 // Schedule another M to run this p. 1817 _p_ := releasep() 1818 handoffp(_p_) 1819 } 1820 incidlelocked(1) 1821 // Wait until another thread schedules lockedg again. 1822 notesleep(&_g_.m.park) 1823 noteclear(&_g_.m.park) 1824 status := readgstatus(_g_.m.lockedg) 1825 if status&^_Gscan != _Grunnable { 1826 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 1827 dumpgstatus(_g_) 1828 throw("stoplockedm: not runnable") 1829 } 1830 acquirep(_g_.m.nextp.ptr()) 1831 _g_.m.nextp = 0 1832 } 1833 1834 // Schedules the locked m to run the locked gp. 1835 // May run during STW, so write barriers are not allowed. 1836 //go:nowritebarrierrec 1837 func startlockedm(gp *g) { 1838 _g_ := getg() 1839 1840 mp := gp.lockedm 1841 if mp == _g_.m { 1842 throw("startlockedm: locked to me") 1843 } 1844 if mp.nextp != 0 { 1845 throw("startlockedm: m has p") 1846 } 1847 // directly handoff current P to the locked m 1848 incidlelocked(-1) 1849 _p_ := releasep() 1850 mp.nextp.set(_p_) 1851 notewakeup(&mp.park) 1852 stopm() 1853 } 1854 1855 // Stops the current m for stopTheWorld. 1856 // Returns when the world is restarted. 1857 func gcstopm() { 1858 _g_ := getg() 1859 1860 if sched.gcwaiting == 0 { 1861 throw("gcstopm: not waiting for gc") 1862 } 1863 if _g_.m.spinning { 1864 _g_.m.spinning = false 1865 // OK to just drop nmspinning here, 1866 // startTheWorld will unpark threads as necessary. 1867 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1868 throw("gcstopm: negative nmspinning") 1869 } 1870 } 1871 _p_ := releasep() 1872 lock(&sched.lock) 1873 _p_.status = _Pgcstop 1874 sched.stopwait-- 1875 if sched.stopwait == 0 { 1876 notewakeup(&sched.stopnote) 1877 } 1878 unlock(&sched.lock) 1879 stopm() 1880 } 1881 1882 // Schedules gp to run on the current M. 1883 // If inheritTime is true, gp inherits the remaining time in the 1884 // current time slice. Otherwise, it starts a new time slice. 1885 // Never returns. 1886 // 1887 // Write barriers are allowed because this is called immediately after 1888 // acquiring a P in several places. 1889 // 1890 //go:yeswritebarrierrec 1891 func execute(gp *g, inheritTime bool) { 1892 _g_ := getg() 1893 1894 casgstatus(gp, _Grunnable, _Grunning) 1895 gp.waitsince = 0 1896 gp.preempt = false 1897 gp.stackguard0 = gp.stack.lo + _StackGuard 1898 if !inheritTime { 1899 _g_.m.p.ptr().schedtick++ 1900 } 1901 _g_.m.curg = gp 1902 gp.m = _g_.m 1903 1904 // Check whether the profiler needs to be turned on or off. 1905 hz := sched.profilehz 1906 if _g_.m.profilehz != hz { 1907 setThreadCPUProfiler(hz) 1908 } 1909 1910 if trace.enabled { 1911 // GoSysExit has to happen when we have a P, but before GoStart. 1912 // So we emit it here. 1913 if gp.syscallsp != 0 && gp.sysblocktraced { 1914 traceGoSysExit(gp.sysexitticks) 1915 } 1916 traceGoStart() 1917 } 1918 1919 gogo(&gp.sched) 1920 } 1921 1922 // Finds a runnable goroutine to execute. 1923 // Tries to steal from other P's, get g from global queue, poll network. 1924 func findrunnable() (gp *g, inheritTime bool) { 1925 _g_ := getg() 1926 1927 // The conditions here and in handoffp must agree: if 1928 // findrunnable would return a G to run, handoffp must start 1929 // an M. 1930 1931 top: 1932 _p_ := _g_.m.p.ptr() 1933 if sched.gcwaiting != 0 { 1934 gcstopm() 1935 goto top 1936 } 1937 if _p_.runSafePointFn != 0 { 1938 runSafePointFn() 1939 } 1940 if fingwait && fingwake { 1941 if gp := wakefing(); gp != nil { 1942 ready(gp, 0, true) 1943 } 1944 } 1945 if *cgo_yield != nil { 1946 asmcgocall(*cgo_yield, nil) 1947 } 1948 1949 // local runq 1950 if gp, inheritTime := runqget(_p_); gp != nil { 1951 return gp, inheritTime 1952 } 1953 1954 // global runq 1955 if sched.runqsize != 0 { 1956 lock(&sched.lock) 1957 gp := globrunqget(_p_, 0) 1958 unlock(&sched.lock) 1959 if gp != nil { 1960 return gp, false 1961 } 1962 } 1963 1964 // Poll network. 1965 // This netpoll is only an optimization before we resort to stealing. 1966 // We can safely skip it if there a thread blocked in netpoll already. 1967 // If there is any kind of logical race with that blocked thread 1968 // (e.g. it has already returned from netpoll, but does not set lastpoll yet), 1969 // this thread will do blocking netpoll below anyway. 1970 if netpollinited() && sched.lastpoll != 0 { 1971 if gp := netpoll(false); gp != nil { // non-blocking 1972 // netpoll returns list of goroutines linked by schedlink. 1973 injectglist(gp.schedlink.ptr()) 1974 casgstatus(gp, _Gwaiting, _Grunnable) 1975 if trace.enabled { 1976 traceGoUnpark(gp, 0) 1977 } 1978 return gp, false 1979 } 1980 } 1981 1982 // Steal work from other P's. 1983 procs := uint32(gomaxprocs) 1984 if atomic.Load(&sched.npidle) == procs-1 { 1985 // Either GOMAXPROCS=1 or everybody, except for us, is idle already. 1986 // New work can appear from returning syscall/cgocall, network or timers. 1987 // Neither of that submits to local run queues, so no point in stealing. 1988 goto stop 1989 } 1990 // If number of spinning M's >= number of busy P's, block. 1991 // This is necessary to prevent excessive CPU consumption 1992 // when GOMAXPROCS>>1 but the program parallelism is low. 1993 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { 1994 goto stop 1995 } 1996 if !_g_.m.spinning { 1997 _g_.m.spinning = true 1998 atomic.Xadd(&sched.nmspinning, 1) 1999 } 2000 for i := 0; i < 4; i++ { 2001 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { 2002 if sched.gcwaiting != 0 { 2003 goto top 2004 } 2005 stealRunNextG := i > 2 // first look for ready queues with more than 1 g 2006 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil { 2007 return gp, false 2008 } 2009 } 2010 } 2011 2012 stop: 2013 2014 // We have nothing to do. If we're in the GC mark phase, can 2015 // safely scan and blacken objects, and have work to do, run 2016 // idle-time marking rather than give up the P. 2017 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) { 2018 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 2019 gp := _p_.gcBgMarkWorker.ptr() 2020 casgstatus(gp, _Gwaiting, _Grunnable) 2021 if trace.enabled { 2022 traceGoUnpark(gp, 0) 2023 } 2024 return gp, false 2025 } 2026 2027 // return P and block 2028 lock(&sched.lock) 2029 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { 2030 unlock(&sched.lock) 2031 goto top 2032 } 2033 if sched.runqsize != 0 { 2034 gp := globrunqget(_p_, 0) 2035 unlock(&sched.lock) 2036 return gp, false 2037 } 2038 if releasep() != _p_ { 2039 throw("findrunnable: wrong p") 2040 } 2041 pidleput(_p_) 2042 unlock(&sched.lock) 2043 2044 // Delicate dance: thread transitions from spinning to non-spinning state, 2045 // potentially concurrently with submission of new goroutines. We must 2046 // drop nmspinning first and then check all per-P queues again (with 2047 // #StoreLoad memory barrier in between). If we do it the other way around, 2048 // another thread can submit a goroutine after we've checked all run queues 2049 // but before we drop nmspinning; as the result nobody will unpark a thread 2050 // to run the goroutine. 2051 // If we discover new work below, we need to restore m.spinning as a signal 2052 // for resetspinning to unpark a new worker thread (because there can be more 2053 // than one starving goroutine). However, if after discovering new work 2054 // we also observe no idle Ps, it is OK to just park the current thread: 2055 // the system is fully loaded so no spinning threads are required. 2056 // Also see "Worker thread parking/unparking" comment at the top of the file. 2057 wasSpinning := _g_.m.spinning 2058 if _g_.m.spinning { 2059 _g_.m.spinning = false 2060 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2061 throw("findrunnable: negative nmspinning") 2062 } 2063 } 2064 2065 // check all runqueues once again 2066 for i := 0; i < int(gomaxprocs); i++ { 2067 _p_ := allp[i] 2068 if _p_ != nil && !runqempty(_p_) { 2069 lock(&sched.lock) 2070 _p_ = pidleget() 2071 unlock(&sched.lock) 2072 if _p_ != nil { 2073 acquirep(_p_) 2074 if wasSpinning { 2075 _g_.m.spinning = true 2076 atomic.Xadd(&sched.nmspinning, 1) 2077 } 2078 goto top 2079 } 2080 break 2081 } 2082 } 2083 2084 // Check for idle-priority GC work again. 2085 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) { 2086 lock(&sched.lock) 2087 _p_ = pidleget() 2088 if _p_ != nil && _p_.gcBgMarkWorker == 0 { 2089 pidleput(_p_) 2090 _p_ = nil 2091 } 2092 unlock(&sched.lock) 2093 if _p_ != nil { 2094 acquirep(_p_) 2095 if wasSpinning { 2096 _g_.m.spinning = true 2097 atomic.Xadd(&sched.nmspinning, 1) 2098 } 2099 // Go back to idle GC check. 2100 goto stop 2101 } 2102 } 2103 2104 // poll network 2105 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 2106 if _g_.m.p != 0 { 2107 throw("findrunnable: netpoll with p") 2108 } 2109 if _g_.m.spinning { 2110 throw("findrunnable: netpoll with spinning") 2111 } 2112 gp := netpoll(true) // block until new work is available 2113 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 2114 if gp != nil { 2115 lock(&sched.lock) 2116 _p_ = pidleget() 2117 unlock(&sched.lock) 2118 if _p_ != nil { 2119 acquirep(_p_) 2120 injectglist(gp.schedlink.ptr()) 2121 casgstatus(gp, _Gwaiting, _Grunnable) 2122 if trace.enabled { 2123 traceGoUnpark(gp, 0) 2124 } 2125 return gp, false 2126 } 2127 injectglist(gp) 2128 } 2129 } 2130 stopm() 2131 goto top 2132 } 2133 2134 // pollWork returns true if there is non-background work this P could 2135 // be doing. This is a fairly lightweight check to be used for 2136 // background work loops, like idle GC. It checks a subset of the 2137 // conditions checked by the actual scheduler. 2138 func pollWork() bool { 2139 if sched.runqsize != 0 { 2140 return true 2141 } 2142 p := getg().m.p.ptr() 2143 if !runqempty(p) { 2144 return true 2145 } 2146 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { 2147 if gp := netpoll(false); gp != nil { 2148 injectglist(gp) 2149 return true 2150 } 2151 } 2152 return false 2153 } 2154 2155 func resetspinning() { 2156 _g_ := getg() 2157 if !_g_.m.spinning { 2158 throw("resetspinning: not a spinning m") 2159 } 2160 _g_.m.spinning = false 2161 nmspinning := atomic.Xadd(&sched.nmspinning, -1) 2162 if int32(nmspinning) < 0 { 2163 throw("findrunnable: negative nmspinning") 2164 } 2165 // M wakeup policy is deliberately somewhat conservative, so check if we 2166 // need to wakeup another P here. See "Worker thread parking/unparking" 2167 // comment at the top of the file for details. 2168 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 { 2169 wakep() 2170 } 2171 } 2172 2173 // Injects the list of runnable G's into the scheduler. 2174 // Can run concurrently with GC. 2175 func injectglist(glist *g) { 2176 if glist == nil { 2177 return 2178 } 2179 if trace.enabled { 2180 for gp := glist; gp != nil; gp = gp.schedlink.ptr() { 2181 traceGoUnpark(gp, 0) 2182 } 2183 } 2184 lock(&sched.lock) 2185 var n int 2186 for n = 0; glist != nil; n++ { 2187 gp := glist 2188 glist = gp.schedlink.ptr() 2189 casgstatus(gp, _Gwaiting, _Grunnable) 2190 globrunqput(gp) 2191 } 2192 unlock(&sched.lock) 2193 for ; n != 0 && sched.npidle != 0; n-- { 2194 startm(nil, false) 2195 } 2196 } 2197 2198 // One round of scheduler: find a runnable goroutine and execute it. 2199 // Never returns. 2200 func schedule() { 2201 _g_ := getg() 2202 2203 if _g_.m.locks != 0 { 2204 throw("schedule: holding locks") 2205 } 2206 2207 if _g_.m.lockedg != nil { 2208 stoplockedm() 2209 execute(_g_.m.lockedg, false) // Never returns. 2210 } 2211 2212 top: 2213 if sched.gcwaiting != 0 { 2214 gcstopm() 2215 goto top 2216 } 2217 if _g_.m.p.ptr().runSafePointFn != 0 { 2218 runSafePointFn() 2219 } 2220 2221 var gp *g 2222 var inheritTime bool 2223 if trace.enabled || trace.shutdown { 2224 gp = traceReader() 2225 if gp != nil { 2226 casgstatus(gp, _Gwaiting, _Grunnable) 2227 traceGoUnpark(gp, 0) 2228 } 2229 } 2230 if gp == nil && gcBlackenEnabled != 0 { 2231 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 2232 } 2233 if gp == nil { 2234 // Check the global runnable queue once in a while to ensure fairness. 2235 // Otherwise two goroutines can completely occupy the local runqueue 2236 // by constantly respawning each other. 2237 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 2238 lock(&sched.lock) 2239 gp = globrunqget(_g_.m.p.ptr(), 1) 2240 unlock(&sched.lock) 2241 } 2242 } 2243 if gp == nil { 2244 gp, inheritTime = runqget(_g_.m.p.ptr()) 2245 if gp != nil && _g_.m.spinning { 2246 throw("schedule: spinning with local work") 2247 } 2248 } 2249 if gp == nil { 2250 gp, inheritTime = findrunnable() // blocks until work is available 2251 } 2252 2253 // This thread is going to run a goroutine and is not spinning anymore, 2254 // so if it was marked as spinning we need to reset it now and potentially 2255 // start a new spinning M. 2256 if _g_.m.spinning { 2257 resetspinning() 2258 } 2259 2260 if gp.lockedm != nil { 2261 // Hands off own p to the locked m, 2262 // then blocks waiting for a new p. 2263 startlockedm(gp) 2264 goto top 2265 } 2266 2267 execute(gp, inheritTime) 2268 } 2269 2270 // dropg removes the association between m and the current goroutine m->curg (gp for short). 2271 // Typically a caller sets gp's status away from Grunning and then 2272 // immediately calls dropg to finish the job. The caller is also responsible 2273 // for arranging that gp will be restarted using ready at an 2274 // appropriate time. After calling dropg and arranging for gp to be 2275 // readied later, the caller can do other work but eventually should 2276 // call schedule to restart the scheduling of goroutines on this m. 2277 func dropg() { 2278 _g_ := getg() 2279 2280 setMNoWB(&_g_.m.curg.m, nil) 2281 setGNoWB(&_g_.m.curg, nil) 2282 } 2283 2284 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 2285 unlock((*mutex)(lock)) 2286 return true 2287 } 2288 2289 // park continuation on g0. 2290 func park_m(gp *g) { 2291 _g_ := getg() 2292 2293 if trace.enabled { 2294 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip) 2295 } 2296 2297 casgstatus(gp, _Grunning, _Gwaiting) 2298 dropg() 2299 2300 if _g_.m.waitunlockf != nil { 2301 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf)) 2302 ok := fn(gp, _g_.m.waitlock) 2303 _g_.m.waitunlockf = nil 2304 _g_.m.waitlock = nil 2305 if !ok { 2306 if trace.enabled { 2307 traceGoUnpark(gp, 2) 2308 } 2309 casgstatus(gp, _Gwaiting, _Grunnable) 2310 execute(gp, true) // Schedule it back, never returns. 2311 } 2312 } 2313 schedule() 2314 } 2315 2316 func goschedImpl(gp *g) { 2317 status := readgstatus(gp) 2318 if status&^_Gscan != _Grunning { 2319 dumpgstatus(gp) 2320 throw("bad g status") 2321 } 2322 casgstatus(gp, _Grunning, _Grunnable) 2323 dropg() 2324 lock(&sched.lock) 2325 globrunqput(gp) 2326 unlock(&sched.lock) 2327 2328 schedule() 2329 } 2330 2331 // Gosched continuation on g0. 2332 func gosched_m(gp *g) { 2333 if trace.enabled { 2334 traceGoSched() 2335 } 2336 goschedImpl(gp) 2337 } 2338 2339 // goschedguarded is a forbidden-states-avoided version of gosched_m 2340 func goschedguarded_m(gp *g) { 2341 2342 if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning { 2343 gogo(&gp.sched) // never return 2344 } 2345 2346 if trace.enabled { 2347 traceGoSched() 2348 } 2349 goschedImpl(gp) 2350 } 2351 2352 func gopreempt_m(gp *g) { 2353 if trace.enabled { 2354 traceGoPreempt() 2355 } 2356 goschedImpl(gp) 2357 } 2358 2359 // Finishes execution of the current goroutine. 2360 func goexit1() { 2361 if raceenabled { 2362 racegoend() 2363 } 2364 if trace.enabled { 2365 traceGoEnd() 2366 } 2367 mcall(goexit0) 2368 } 2369 2370 // goexit continuation on g0. 2371 func goexit0(gp *g) { 2372 _g_ := getg() 2373 2374 casgstatus(gp, _Grunning, _Gdead) 2375 if isSystemGoroutine(gp) { 2376 atomic.Xadd(&sched.ngsys, -1) 2377 } 2378 gp.m = nil 2379 gp.lockedm = nil 2380 _g_.m.lockedg = nil 2381 gp.paniconfault = false 2382 gp._defer = nil // should be true already but just in case. 2383 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 2384 gp.writebuf = nil 2385 gp.waitreason = "" 2386 gp.param = nil 2387 gp.labels = nil 2388 gp.timer = nil 2389 2390 // Note that gp's stack scan is now "valid" because it has no 2391 // stack. 2392 gp.gcscanvalid = true 2393 dropg() 2394 2395 if _g_.m.locked&^_LockExternal != 0 { 2396 print("invalid m->locked = ", _g_.m.locked, "\n") 2397 throw("internal lockOSThread error") 2398 } 2399 _g_.m.locked = 0 2400 gfput(_g_.m.p.ptr(), gp) 2401 schedule() 2402 } 2403 2404 // save updates getg().sched to refer to pc and sp so that a following 2405 // gogo will restore pc and sp. 2406 // 2407 // save must not have write barriers because invoking a write barrier 2408 // can clobber getg().sched. 2409 // 2410 //go:nosplit 2411 //go:nowritebarrierrec 2412 func save(pc, sp uintptr) { 2413 _g_ := getg() 2414 2415 _g_.sched.pc = pc 2416 _g_.sched.sp = sp 2417 _g_.sched.lr = 0 2418 _g_.sched.ret = 0 2419 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 2420 // We need to ensure ctxt is zero, but can't have a write 2421 // barrier here. However, it should always already be zero. 2422 // Assert that. 2423 if _g_.sched.ctxt != nil { 2424 badctxt() 2425 } 2426 } 2427 2428 // The goroutine g is about to enter a system call. 2429 // Record that it's not using the cpu anymore. 2430 // This is called only from the go syscall library and cgocall, 2431 // not from the low-level system calls used by the runtime. 2432 // 2433 // Entersyscall cannot split the stack: the gosave must 2434 // make g->sched refer to the caller's stack segment, because 2435 // entersyscall is going to return immediately after. 2436 // 2437 // Nothing entersyscall calls can split the stack either. 2438 // We cannot safely move the stack during an active call to syscall, 2439 // because we do not know which of the uintptr arguments are 2440 // really pointers (back into the stack). 2441 // In practice, this means that we make the fast path run through 2442 // entersyscall doing no-split things, and the slow path has to use systemstack 2443 // to run bigger things on the system stack. 2444 // 2445 // reentersyscall is the entry point used by cgo callbacks, where explicitly 2446 // saved SP and PC are restored. This is needed when exitsyscall will be called 2447 // from a function further up in the call stack than the parent, as g->syscallsp 2448 // must always point to a valid stack frame. entersyscall below is the normal 2449 // entry point for syscalls, which obtains the SP and PC from the caller. 2450 // 2451 // Syscall tracing: 2452 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 2453 // If the syscall does not block, that is it, we do not emit any other events. 2454 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 2455 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 2456 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 2457 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 2458 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 2459 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 2460 // and we wait for the increment before emitting traceGoSysExit. 2461 // Note that the increment is done even if tracing is not enabled, 2462 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 2463 // 2464 //go:nosplit 2465 func reentersyscall(pc, sp uintptr) { 2466 _g_ := getg() 2467 2468 // Disable preemption because during this function g is in Gsyscall status, 2469 // but can have inconsistent g->sched, do not let GC observe it. 2470 _g_.m.locks++ 2471 2472 // Entersyscall must not call any function that might split/grow the stack. 2473 // (See details in comment above.) 2474 // Catch calls that might, by replacing the stack guard with something that 2475 // will trip any stack check and leaving a flag to tell newstack to die. 2476 _g_.stackguard0 = stackPreempt 2477 _g_.throwsplit = true 2478 2479 // Leave SP around for GC and traceback. 2480 save(pc, sp) 2481 _g_.syscallsp = sp 2482 _g_.syscallpc = pc 2483 casgstatus(_g_, _Grunning, _Gsyscall) 2484 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2485 systemstack(func() { 2486 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2487 throw("entersyscall") 2488 }) 2489 } 2490 2491 if trace.enabled { 2492 systemstack(traceGoSysCall) 2493 // systemstack itself clobbers g.sched.{pc,sp} and we might 2494 // need them later when the G is genuinely blocked in a 2495 // syscall 2496 save(pc, sp) 2497 } 2498 2499 if atomic.Load(&sched.sysmonwait) != 0 { 2500 systemstack(entersyscall_sysmon) 2501 save(pc, sp) 2502 } 2503 2504 if _g_.m.p.ptr().runSafePointFn != 0 { 2505 // runSafePointFn may stack split if run on this stack 2506 systemstack(runSafePointFn) 2507 save(pc, sp) 2508 } 2509 2510 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2511 _g_.sysblocktraced = true 2512 _g_.m.mcache = nil 2513 _g_.m.p.ptr().m = 0 2514 atomic.Store(&_g_.m.p.ptr().status, _Psyscall) 2515 if sched.gcwaiting != 0 { 2516 systemstack(entersyscall_gcwait) 2517 save(pc, sp) 2518 } 2519 2520 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). 2521 // We set _StackGuard to StackPreempt so that first split stack check calls morestack. 2522 // Morestack detects this case and throws. 2523 _g_.stackguard0 = stackPreempt 2524 _g_.m.locks-- 2525 } 2526 2527 // Standard syscall entry used by the go syscall library and normal cgo calls. 2528 //go:nosplit 2529 func entersyscall(dummy int32) { 2530 reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2531 } 2532 2533 func entersyscall_sysmon() { 2534 lock(&sched.lock) 2535 if atomic.Load(&sched.sysmonwait) != 0 { 2536 atomic.Store(&sched.sysmonwait, 0) 2537 notewakeup(&sched.sysmonnote) 2538 } 2539 unlock(&sched.lock) 2540 } 2541 2542 func entersyscall_gcwait() { 2543 _g_ := getg() 2544 _p_ := _g_.m.p.ptr() 2545 2546 lock(&sched.lock) 2547 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 2548 if trace.enabled { 2549 traceGoSysBlock(_p_) 2550 traceProcStop(_p_) 2551 } 2552 _p_.syscalltick++ 2553 if sched.stopwait--; sched.stopwait == 0 { 2554 notewakeup(&sched.stopnote) 2555 } 2556 } 2557 unlock(&sched.lock) 2558 } 2559 2560 // The same as entersyscall(), but with a hint that the syscall is blocking. 2561 //go:nosplit 2562 func entersyscallblock(dummy int32) { 2563 _g_ := getg() 2564 2565 _g_.m.locks++ // see comment in entersyscall 2566 _g_.throwsplit = true 2567 _g_.stackguard0 = stackPreempt // see comment in entersyscall 2568 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2569 _g_.sysblocktraced = true 2570 _g_.m.p.ptr().syscalltick++ 2571 2572 // Leave SP around for GC and traceback. 2573 pc := getcallerpc(unsafe.Pointer(&dummy)) 2574 sp := getcallersp(unsafe.Pointer(&dummy)) 2575 save(pc, sp) 2576 _g_.syscallsp = _g_.sched.sp 2577 _g_.syscallpc = _g_.sched.pc 2578 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2579 sp1 := sp 2580 sp2 := _g_.sched.sp 2581 sp3 := _g_.syscallsp 2582 systemstack(func() { 2583 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2584 throw("entersyscallblock") 2585 }) 2586 } 2587 casgstatus(_g_, _Grunning, _Gsyscall) 2588 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2589 systemstack(func() { 2590 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2591 throw("entersyscallblock") 2592 }) 2593 } 2594 2595 systemstack(entersyscallblock_handoff) 2596 2597 // Resave for traceback during blocked call. 2598 save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2599 2600 _g_.m.locks-- 2601 } 2602 2603 func entersyscallblock_handoff() { 2604 if trace.enabled { 2605 traceGoSysCall() 2606 traceGoSysBlock(getg().m.p.ptr()) 2607 } 2608 handoffp(releasep()) 2609 } 2610 2611 // The goroutine g exited its system call. 2612 // Arrange for it to run on a cpu again. 2613 // This is called only from the go syscall library, not 2614 // from the low-level system calls used by the runtime. 2615 // 2616 // Write barriers are not allowed because our P may have been stolen. 2617 // 2618 //go:nosplit 2619 //go:nowritebarrierrec 2620 func exitsyscall(dummy int32) { 2621 _g_ := getg() 2622 2623 _g_.m.locks++ // see comment in entersyscall 2624 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp { 2625 // throw calls print which may try to grow the stack, 2626 // but throwsplit == true so the stack can not be grown; 2627 // use systemstack to avoid that possible problem. 2628 systemstack(func() { 2629 throw("exitsyscall: syscall frame is no longer valid") 2630 }) 2631 } 2632 2633 _g_.waitsince = 0 2634 oldp := _g_.m.p.ptr() 2635 if exitsyscallfast() { 2636 if _g_.m.mcache == nil { 2637 throw("lost mcache") 2638 } 2639 if trace.enabled { 2640 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2641 systemstack(traceGoStart) 2642 } 2643 } 2644 // There's a cpu for us, so we can run. 2645 _g_.m.p.ptr().syscalltick++ 2646 // We need to cas the status and scan before resuming... 2647 casgstatus(_g_, _Gsyscall, _Grunning) 2648 2649 // Garbage collector isn't running (since we are), 2650 // so okay to clear syscallsp. 2651 _g_.syscallsp = 0 2652 _g_.m.locks-- 2653 if _g_.preempt { 2654 // restore the preemption request in case we've cleared it in newstack 2655 _g_.stackguard0 = stackPreempt 2656 } else { 2657 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 2658 _g_.stackguard0 = _g_.stack.lo + _StackGuard 2659 } 2660 _g_.throwsplit = false 2661 return 2662 } 2663 2664 _g_.sysexitticks = 0 2665 if trace.enabled { 2666 // Wait till traceGoSysBlock event is emitted. 2667 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2668 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 2669 osyield() 2670 } 2671 // We can't trace syscall exit right now because we don't have a P. 2672 // Tracing code can invoke write barriers that cannot run without a P. 2673 // So instead we remember the syscall exit time and emit the event 2674 // in execute when we have a P. 2675 _g_.sysexitticks = cputicks() 2676 } 2677 2678 _g_.m.locks-- 2679 2680 // Call the scheduler. 2681 mcall(exitsyscall0) 2682 2683 if _g_.m.mcache == nil { 2684 throw("lost mcache") 2685 } 2686 2687 // Scheduler returned, so we're allowed to run now. 2688 // Delete the syscallsp information that we left for 2689 // the garbage collector during the system call. 2690 // Must wait until now because until gosched returns 2691 // we don't know for sure that the garbage collector 2692 // is not running. 2693 _g_.syscallsp = 0 2694 _g_.m.p.ptr().syscalltick++ 2695 _g_.throwsplit = false 2696 } 2697 2698 //go:nosplit 2699 func exitsyscallfast() bool { 2700 _g_ := getg() 2701 2702 // Freezetheworld sets stopwait but does not retake P's. 2703 if sched.stopwait == freezeStopWait { 2704 _g_.m.mcache = nil 2705 _g_.m.p = 0 2706 return false 2707 } 2708 2709 // Try to re-acquire the last P. 2710 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) { 2711 // There's a cpu for us, so we can run. 2712 exitsyscallfast_reacquired() 2713 return true 2714 } 2715 2716 // Try to get any other idle P. 2717 oldp := _g_.m.p.ptr() 2718 _g_.m.mcache = nil 2719 _g_.m.p = 0 2720 if sched.pidle != 0 { 2721 var ok bool 2722 systemstack(func() { 2723 ok = exitsyscallfast_pidle() 2724 if ok && trace.enabled { 2725 if oldp != nil { 2726 // Wait till traceGoSysBlock event is emitted. 2727 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2728 for oldp.syscalltick == _g_.m.syscalltick { 2729 osyield() 2730 } 2731 } 2732 traceGoSysExit(0) 2733 } 2734 }) 2735 if ok { 2736 return true 2737 } 2738 } 2739 return false 2740 } 2741 2742 // exitsyscallfast_reacquired is the exitsyscall path on which this G 2743 // has successfully reacquired the P it was running on before the 2744 // syscall. 2745 // 2746 // This function is allowed to have write barriers because exitsyscall 2747 // has acquired a P at this point. 2748 // 2749 //go:yeswritebarrierrec 2750 //go:nosplit 2751 func exitsyscallfast_reacquired() { 2752 _g_ := getg() 2753 _g_.m.mcache = _g_.m.p.ptr().mcache 2754 _g_.m.p.ptr().m.set(_g_.m) 2755 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2756 if trace.enabled { 2757 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 2758 // traceGoSysBlock for this syscall was already emitted, 2759 // but here we effectively retake the p from the new syscall running on the same p. 2760 systemstack(func() { 2761 // Denote blocking of the new syscall. 2762 traceGoSysBlock(_g_.m.p.ptr()) 2763 // Denote completion of the current syscall. 2764 traceGoSysExit(0) 2765 }) 2766 } 2767 _g_.m.p.ptr().syscalltick++ 2768 } 2769 } 2770 2771 func exitsyscallfast_pidle() bool { 2772 lock(&sched.lock) 2773 _p_ := pidleget() 2774 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 2775 atomic.Store(&sched.sysmonwait, 0) 2776 notewakeup(&sched.sysmonnote) 2777 } 2778 unlock(&sched.lock) 2779 if _p_ != nil { 2780 acquirep(_p_) 2781 return true 2782 } 2783 return false 2784 } 2785 2786 // exitsyscall slow path on g0. 2787 // Failed to acquire P, enqueue gp as runnable. 2788 // 2789 //go:nowritebarrierrec 2790 func exitsyscall0(gp *g) { 2791 _g_ := getg() 2792 2793 casgstatus(gp, _Gsyscall, _Grunnable) 2794 dropg() 2795 lock(&sched.lock) 2796 _p_ := pidleget() 2797 if _p_ == nil { 2798 globrunqput(gp) 2799 } else if atomic.Load(&sched.sysmonwait) != 0 { 2800 atomic.Store(&sched.sysmonwait, 0) 2801 notewakeup(&sched.sysmonnote) 2802 } 2803 unlock(&sched.lock) 2804 if _p_ != nil { 2805 acquirep(_p_) 2806 execute(gp, false) // Never returns. 2807 } 2808 if _g_.m.lockedg != nil { 2809 // Wait until another thread schedules gp and so m again. 2810 stoplockedm() 2811 execute(gp, false) // Never returns. 2812 } 2813 stopm() 2814 schedule() // Never returns. 2815 } 2816 2817 func beforefork() { 2818 gp := getg().m.curg 2819 2820 // Block signals during a fork, so that the child does not run 2821 // a signal handler before exec if a signal is sent to the process 2822 // group. See issue #18600. 2823 gp.m.locks++ 2824 msigsave(gp.m) 2825 sigblock() 2826 2827 // This function is called before fork in syscall package. 2828 // Code between fork and exec must not allocate memory nor even try to grow stack. 2829 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 2830 // runtime_AfterFork will undo this in parent process, but not in child. 2831 gp.stackguard0 = stackFork 2832 } 2833 2834 // Called from syscall package before fork. 2835 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 2836 //go:nosplit 2837 func syscall_runtime_BeforeFork() { 2838 systemstack(beforefork) 2839 } 2840 2841 func afterfork() { 2842 gp := getg().m.curg 2843 2844 // See the comments in beforefork. 2845 gp.stackguard0 = gp.stack.lo + _StackGuard 2846 2847 msigrestore(gp.m.sigmask) 2848 2849 gp.m.locks-- 2850 } 2851 2852 // Called from syscall package after fork in parent. 2853 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 2854 //go:nosplit 2855 func syscall_runtime_AfterFork() { 2856 systemstack(afterfork) 2857 } 2858 2859 // inForkedChild is true while manipulating signals in the child process. 2860 // This is used to avoid calling libc functions in case we are using vfork. 2861 var inForkedChild bool 2862 2863 // Called from syscall package after fork in child. 2864 // It resets non-sigignored signals to the default handler, and 2865 // restores the signal mask in preparation for the exec. 2866 // 2867 // Because this might be called during a vfork, and therefore may be 2868 // temporarily sharing address space with the parent process, this must 2869 // not change any global variables or calling into C code that may do so. 2870 // 2871 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild 2872 //go:nosplit 2873 //go:nowritebarrierrec 2874 func syscall_runtime_AfterForkInChild() { 2875 // It's OK to change the global variable inForkedChild here 2876 // because we are going to change it back. There is no race here, 2877 // because if we are sharing address space with the parent process, 2878 // then the parent process can not be running concurrently. 2879 inForkedChild = true 2880 2881 clearSignalHandlers() 2882 2883 // When we are the child we are the only thread running, 2884 // so we know that nothing else has changed gp.m.sigmask. 2885 msigrestore(getg().m.sigmask) 2886 2887 inForkedChild = false 2888 } 2889 2890 // Called from syscall package before Exec. 2891 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec 2892 func syscall_runtime_BeforeExec() { 2893 // Prevent thread creation during exec. 2894 execLock.lock() 2895 } 2896 2897 // Called from syscall package after Exec. 2898 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec 2899 func syscall_runtime_AfterExec() { 2900 execLock.unlock() 2901 } 2902 2903 // Allocate a new g, with a stack big enough for stacksize bytes. 2904 func malg(stacksize int32) *g { 2905 newg := new(g) 2906 if stacksize >= 0 { 2907 stacksize = round2(_StackSystem + stacksize) 2908 systemstack(func() { 2909 newg.stack = stackalloc(uint32(stacksize)) 2910 }) 2911 newg.stackguard0 = newg.stack.lo + _StackGuard 2912 newg.stackguard1 = ^uintptr(0) 2913 } 2914 return newg 2915 } 2916 2917 // Create a new g running fn with siz bytes of arguments. 2918 // Put it on the queue of g's waiting to run. 2919 // The compiler turns a go statement into a call to this. 2920 // Cannot split the stack because it assumes that the arguments 2921 // are available sequentially after &fn; they would not be 2922 // copied if a stack split occurred. 2923 //go:nosplit 2924 func newproc(siz int32, fn *funcval) { 2925 argp := add(unsafe.Pointer(&fn), sys.PtrSize) 2926 pc := getcallerpc(unsafe.Pointer(&siz)) 2927 systemstack(func() { 2928 newproc1(fn, (*uint8)(argp), siz, 0, pc) 2929 }) 2930 } 2931 2932 // Create a new g running fn with narg bytes of arguments starting 2933 // at argp and returning nret bytes of results. callerpc is the 2934 // address of the go statement that created this. The new g is put 2935 // on the queue of g's waiting to run. 2936 func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g { 2937 _g_ := getg() 2938 2939 if fn == nil { 2940 _g_.m.throwing = -1 // do not dump full stacks 2941 throw("go of nil func value") 2942 } 2943 _g_.m.locks++ // disable preemption because it can be holding p in a local var 2944 siz := narg + nret 2945 siz = (siz + 7) &^ 7 2946 2947 // We could allocate a larger initial stack if necessary. 2948 // Not worth it: this is almost always an error. 2949 // 4*sizeof(uintreg): extra space added below 2950 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 2951 if siz >= _StackMin-4*sys.RegSize-sys.RegSize { 2952 throw("newproc: function arguments too large for new goroutine") 2953 } 2954 2955 _p_ := _g_.m.p.ptr() 2956 newg := gfget(_p_) 2957 if newg == nil { 2958 newg = malg(_StackMin) 2959 casgstatus(newg, _Gidle, _Gdead) 2960 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 2961 } 2962 if newg.stack.hi == 0 { 2963 throw("newproc1: newg missing stack") 2964 } 2965 2966 if readgstatus(newg) != _Gdead { 2967 throw("newproc1: new g is not Gdead") 2968 } 2969 2970 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame 2971 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign 2972 sp := newg.stack.hi - totalSize 2973 spArg := sp 2974 if usesLR { 2975 // caller's LR 2976 *(*uintptr)(unsafe.Pointer(sp)) = 0 2977 prepGoExitFrame(sp) 2978 spArg += sys.MinFrameSize 2979 } 2980 if narg > 0 { 2981 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) 2982 // This is a stack-to-stack copy. If write barriers 2983 // are enabled and the source stack is grey (the 2984 // destination is always black), then perform a 2985 // barrier copy. We do this *after* the memmove 2986 // because the destination stack may have garbage on 2987 // it. 2988 if writeBarrier.needed && !_g_.m.curg.gcscandone { 2989 f := findfunc(fn.fn) 2990 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 2991 // We're in the prologue, so it's always stack map index 0. 2992 bv := stackmapdata(stkmap, 0) 2993 bulkBarrierBitmap(spArg, spArg, uintptr(narg), 0, bv.bytedata) 2994 } 2995 } 2996 2997 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 2998 newg.sched.sp = sp 2999 newg.stktopsp = sp 3000 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 3001 newg.sched.g = guintptr(unsafe.Pointer(newg)) 3002 gostartcallfn(&newg.sched, fn) 3003 newg.gopc = callerpc 3004 newg.startpc = fn.fn 3005 if _g_.m.curg != nil { 3006 newg.labels = _g_.m.curg.labels 3007 } 3008 if isSystemGoroutine(newg) { 3009 atomic.Xadd(&sched.ngsys, +1) 3010 } 3011 newg.gcscanvalid = false 3012 casgstatus(newg, _Gdead, _Grunnable) 3013 3014 if _p_.goidcache == _p_.goidcacheend { 3015 // Sched.goidgen is the last allocated id, 3016 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 3017 // At startup sched.goidgen=0, so main goroutine receives goid=1. 3018 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 3019 _p_.goidcache -= _GoidCacheBatch - 1 3020 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 3021 } 3022 newg.goid = int64(_p_.goidcache) 3023 _p_.goidcache++ 3024 if raceenabled { 3025 newg.racectx = racegostart(callerpc) 3026 } 3027 if trace.enabled { 3028 traceGoCreate(newg, newg.startpc) 3029 } 3030 runqput(_p_, newg, true) 3031 3032 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && runtimeInitTime != 0 { 3033 wakep() 3034 } 3035 _g_.m.locks-- 3036 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 3037 _g_.stackguard0 = stackPreempt 3038 } 3039 return newg 3040 } 3041 3042 // Put on gfree list. 3043 // If local list is too long, transfer a batch to the global list. 3044 func gfput(_p_ *p, gp *g) { 3045 if readgstatus(gp) != _Gdead { 3046 throw("gfput: bad status (not Gdead)") 3047 } 3048 3049 stksize := gp.stack.hi - gp.stack.lo 3050 3051 if stksize != _FixedStack { 3052 // non-standard stack size - free it. 3053 stackfree(gp.stack) 3054 gp.stack.lo = 0 3055 gp.stack.hi = 0 3056 gp.stackguard0 = 0 3057 } 3058 3059 gp.schedlink.set(_p_.gfree) 3060 _p_.gfree = gp 3061 _p_.gfreecnt++ 3062 if _p_.gfreecnt >= 64 { 3063 lock(&sched.gflock) 3064 for _p_.gfreecnt >= 32 { 3065 _p_.gfreecnt-- 3066 gp = _p_.gfree 3067 _p_.gfree = gp.schedlink.ptr() 3068 if gp.stack.lo == 0 { 3069 gp.schedlink.set(sched.gfreeNoStack) 3070 sched.gfreeNoStack = gp 3071 } else { 3072 gp.schedlink.set(sched.gfreeStack) 3073 sched.gfreeStack = gp 3074 } 3075 sched.ngfree++ 3076 } 3077 unlock(&sched.gflock) 3078 } 3079 } 3080 3081 // Get from gfree list. 3082 // If local list is empty, grab a batch from global list. 3083 func gfget(_p_ *p) *g { 3084 retry: 3085 gp := _p_.gfree 3086 if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) { 3087 lock(&sched.gflock) 3088 for _p_.gfreecnt < 32 { 3089 if sched.gfreeStack != nil { 3090 // Prefer Gs with stacks. 3091 gp = sched.gfreeStack 3092 sched.gfreeStack = gp.schedlink.ptr() 3093 } else if sched.gfreeNoStack != nil { 3094 gp = sched.gfreeNoStack 3095 sched.gfreeNoStack = gp.schedlink.ptr() 3096 } else { 3097 break 3098 } 3099 _p_.gfreecnt++ 3100 sched.ngfree-- 3101 gp.schedlink.set(_p_.gfree) 3102 _p_.gfree = gp 3103 } 3104 unlock(&sched.gflock) 3105 goto retry 3106 } 3107 if gp != nil { 3108 _p_.gfree = gp.schedlink.ptr() 3109 _p_.gfreecnt-- 3110 if gp.stack.lo == 0 { 3111 // Stack was deallocated in gfput. Allocate a new one. 3112 systemstack(func() { 3113 gp.stack = stackalloc(_FixedStack) 3114 }) 3115 gp.stackguard0 = gp.stack.lo + _StackGuard 3116 } else { 3117 if raceenabled { 3118 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3119 } 3120 if msanenabled { 3121 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3122 } 3123 } 3124 } 3125 return gp 3126 } 3127 3128 // Purge all cached G's from gfree list to the global list. 3129 func gfpurge(_p_ *p) { 3130 lock(&sched.gflock) 3131 for _p_.gfreecnt != 0 { 3132 _p_.gfreecnt-- 3133 gp := _p_.gfree 3134 _p_.gfree = gp.schedlink.ptr() 3135 if gp.stack.lo == 0 { 3136 gp.schedlink.set(sched.gfreeNoStack) 3137 sched.gfreeNoStack = gp 3138 } else { 3139 gp.schedlink.set(sched.gfreeStack) 3140 sched.gfreeStack = gp 3141 } 3142 sched.ngfree++ 3143 } 3144 unlock(&sched.gflock) 3145 } 3146 3147 // Breakpoint executes a breakpoint trap. 3148 func Breakpoint() { 3149 breakpoint() 3150 } 3151 3152 // dolockOSThread is called by LockOSThread and lockOSThread below 3153 // after they modify m.locked. Do not allow preemption during this call, 3154 // or else the m might be different in this function than in the caller. 3155 //go:nosplit 3156 func dolockOSThread() { 3157 _g_ := getg() 3158 _g_.m.lockedg = _g_ 3159 _g_.lockedm = _g_.m 3160 } 3161 3162 //go:nosplit 3163 3164 // LockOSThread wires the calling goroutine to its current operating system thread. 3165 // Until the calling goroutine exits or calls UnlockOSThread, it will always 3166 // execute in that thread, and no other goroutine can. 3167 func LockOSThread() { 3168 getg().m.locked |= _LockExternal 3169 dolockOSThread() 3170 } 3171 3172 //go:nosplit 3173 func lockOSThread() { 3174 getg().m.locked += _LockInternal 3175 dolockOSThread() 3176 } 3177 3178 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 3179 // after they update m->locked. Do not allow preemption during this call, 3180 // or else the m might be in different in this function than in the caller. 3181 //go:nosplit 3182 func dounlockOSThread() { 3183 _g_ := getg() 3184 if _g_.m.locked != 0 { 3185 return 3186 } 3187 _g_.m.lockedg = nil 3188 _g_.lockedm = nil 3189 } 3190 3191 //go:nosplit 3192 3193 // UnlockOSThread unwires the calling goroutine from its fixed operating system thread. 3194 // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op. 3195 func UnlockOSThread() { 3196 getg().m.locked &^= _LockExternal 3197 dounlockOSThread() 3198 } 3199 3200 //go:nosplit 3201 func unlockOSThread() { 3202 _g_ := getg() 3203 if _g_.m.locked < _LockInternal { 3204 systemstack(badunlockosthread) 3205 } 3206 _g_.m.locked -= _LockInternal 3207 dounlockOSThread() 3208 } 3209 3210 func badunlockosthread() { 3211 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 3212 } 3213 3214 func gcount() int32 { 3215 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys)) 3216 for _, _p_ := range &allp { 3217 if _p_ == nil { 3218 break 3219 } 3220 n -= _p_.gfreecnt 3221 } 3222 3223 // All these variables can be changed concurrently, so the result can be inconsistent. 3224 // But at least the current goroutine is running. 3225 if n < 1 { 3226 n = 1 3227 } 3228 return n 3229 } 3230 3231 func mcount() int32 { 3232 return sched.mcount 3233 } 3234 3235 var prof struct { 3236 signalLock uint32 3237 hz int32 3238 } 3239 3240 func _System() { _System() } 3241 func _ExternalCode() { _ExternalCode() } 3242 func _LostExternalCode() { _LostExternalCode() } 3243 func _GC() { _GC() } 3244 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } 3245 3246 // Counts SIGPROFs received while in atomic64 critical section, on mips{,le} 3247 var lostAtomic64Count uint64 3248 3249 // Called if we receive a SIGPROF signal. 3250 // Called by the signal handler, may run during STW. 3251 //go:nowritebarrierrec 3252 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 3253 if prof.hz == 0 { 3254 return 3255 } 3256 3257 // On mips{,le}, 64bit atomics are emulated with spinlocks, in 3258 // runtime/internal/atomic. If SIGPROF arrives while the program is inside 3259 // the critical section, it creates a deadlock (when writing the sample). 3260 // As a workaround, create a counter of SIGPROFs while in critical section 3261 // to store the count, and pass it to sigprof.add() later when SIGPROF is 3262 // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc). 3263 if GOARCH == "mips" || GOARCH == "mipsle" { 3264 if f := findfunc(pc); f.valid() { 3265 if hasprefix(funcname(f), "runtime/internal/atomic") { 3266 lostAtomic64Count++ 3267 return 3268 } 3269 } 3270 } 3271 3272 // Profiling runs concurrently with GC, so it must not allocate. 3273 // Set a trap in case the code does allocate. 3274 // Note that on windows, one thread takes profiles of all the 3275 // other threads, so mp is usually not getg().m. 3276 // In fact mp may not even be stopped. 3277 // See golang.org/issue/17165. 3278 getg().m.mallocing++ 3279 3280 // Define that a "user g" is a user-created goroutine, and a "system g" 3281 // is one that is m->g0 or m->gsignal. 3282 // 3283 // We might be interrupted for profiling halfway through a 3284 // goroutine switch. The switch involves updating three (or four) values: 3285 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 3286 // because once it gets updated the new g is running. 3287 // 3288 // When switching from a user g to a system g, LR is not considered live, 3289 // so the update only affects g, SP, and PC. Since PC must be last, there 3290 // the possible partial transitions in ordinary execution are (1) g alone is updated, 3291 // (2) both g and SP are updated, and (3) SP alone is updated. 3292 // If SP or g alone is updated, we can detect the partial transition by checking 3293 // whether the SP is within g's stack bounds. (We could also require that SP 3294 // be changed only after g, but the stack bounds check is needed by other 3295 // cases, so there is no need to impose an additional requirement.) 3296 // 3297 // There is one exceptional transition to a system g, not in ordinary execution. 3298 // When a signal arrives, the operating system starts the signal handler running 3299 // with an updated PC and SP. The g is updated last, at the beginning of the 3300 // handler. There are two reasons this is okay. First, until g is updated the 3301 // g and SP do not match, so the stack bounds check detects the partial transition. 3302 // Second, signal handlers currently run with signals disabled, so a profiling 3303 // signal cannot arrive during the handler. 3304 // 3305 // When switching from a system g to a user g, there are three possibilities. 3306 // 3307 // First, it may be that the g switch has no PC update, because the SP 3308 // either corresponds to a user g throughout (as in asmcgocall) 3309 // or because it has been arranged to look like a user g frame 3310 // (as in cgocallback_gofunc). In this case, since the entire 3311 // transition is a g+SP update, a partial transition updating just one of 3312 // those will be detected by the stack bounds check. 3313 // 3314 // Second, when returning from a signal handler, the PC and SP updates 3315 // are performed by the operating system in an atomic update, so the g 3316 // update must be done before them. The stack bounds check detects 3317 // the partial transition here, and (again) signal handlers run with signals 3318 // disabled, so a profiling signal cannot arrive then anyway. 3319 // 3320 // Third, the common case: it may be that the switch updates g, SP, and PC 3321 // separately. If the PC is within any of the functions that does this, 3322 // we don't ask for a traceback. C.F. the function setsSP for more about this. 3323 // 3324 // There is another apparently viable approach, recorded here in case 3325 // the "PC within setsSP function" check turns out not to be usable. 3326 // It would be possible to delay the update of either g or SP until immediately 3327 // before the PC update instruction. Then, because of the stack bounds check, 3328 // the only problematic interrupt point is just before that PC update instruction, 3329 // and the sigprof handler can detect that instruction and simulate stepping past 3330 // it in order to reach a consistent state. On ARM, the update of g must be made 3331 // in two places (in R10 and also in a TLS slot), so the delayed update would 3332 // need to be the SP update. The sigprof handler must read the instruction at 3333 // the current PC and if it was the known instruction (for example, JMP BX or 3334 // MOV R2, PC), use that other register in place of the PC value. 3335 // The biggest drawback to this solution is that it requires that we can tell 3336 // whether it's safe to read from the memory pointed at by PC. 3337 // In a correct program, we can test PC == nil and otherwise read, 3338 // but if a profiling signal happens at the instant that a program executes 3339 // a bad jump (before the program manages to handle the resulting fault) 3340 // the profiling handler could fault trying to read nonexistent memory. 3341 // 3342 // To recap, there are no constraints on the assembly being used for the 3343 // transition. We simply require that g and SP match and that the PC is not 3344 // in gogo. 3345 traceback := true 3346 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) { 3347 traceback = false 3348 } 3349 var stk [maxCPUProfStack]uintptr 3350 n := 0 3351 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 3352 cgoOff := 0 3353 // Check cgoCallersUse to make sure that we are not 3354 // interrupting other code that is fiddling with 3355 // cgoCallers. We are running in a signal handler 3356 // with all signals blocked, so we don't have to worry 3357 // about any other code interrupting us. 3358 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 { 3359 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { 3360 cgoOff++ 3361 } 3362 copy(stk[:], mp.cgoCallers[:cgoOff]) 3363 mp.cgoCallers[0] = 0 3364 } 3365 3366 // Collect Go stack that leads to the cgo call. 3367 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0) 3368 } else if traceback { 3369 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 3370 } 3371 3372 if n <= 0 { 3373 // Normal traceback is impossible or has failed. 3374 // See if it falls into several common cases. 3375 n = 0 3376 if GOOS == "windows" && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 3377 // Libcall, i.e. runtime syscall on windows. 3378 // Collect Go stack that leads to the call. 3379 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 3380 } 3381 if n == 0 { 3382 // If all of the above has failed, account it against abstract "System" or "GC". 3383 n = 2 3384 // "ExternalCode" is better than "etext". 3385 if pc > firstmoduledata.etext { 3386 pc = funcPC(_ExternalCode) + sys.PCQuantum 3387 } 3388 stk[0] = pc 3389 if mp.preemptoff != "" || mp.helpgc != 0 { 3390 stk[1] = funcPC(_GC) + sys.PCQuantum 3391 } else { 3392 stk[1] = funcPC(_System) + sys.PCQuantum 3393 } 3394 } 3395 } 3396 3397 if prof.hz != 0 { 3398 if (GOARCH == "mips" || GOARCH == "mipsle") && lostAtomic64Count > 0 { 3399 cpuprof.addLostAtomic64(lostAtomic64Count) 3400 lostAtomic64Count = 0 3401 } 3402 cpuprof.add(gp, stk[:n]) 3403 } 3404 getg().m.mallocing-- 3405 } 3406 3407 // If the signal handler receives a SIGPROF signal on a non-Go thread, 3408 // it tries to collect a traceback into sigprofCallers. 3409 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback. 3410 var sigprofCallers cgoCallers 3411 var sigprofCallersUse uint32 3412 3413 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, 3414 // and the signal handler collected a stack trace in sigprofCallers. 3415 // When this is called, sigprofCallersUse will be non-zero. 3416 // g is nil, and what we can do is very limited. 3417 //go:nosplit 3418 //go:nowritebarrierrec 3419 func sigprofNonGo() { 3420 if prof.hz != 0 { 3421 n := 0 3422 for n < len(sigprofCallers) && sigprofCallers[n] != 0 { 3423 n++ 3424 } 3425 cpuprof.addNonGo(sigprofCallers[:n]) 3426 } 3427 3428 atomic.Store(&sigprofCallersUse, 0) 3429 } 3430 3431 // sigprofNonGoPC is called when a profiling signal arrived on a 3432 // non-Go thread and we have a single PC value, not a stack trace. 3433 // g is nil, and what we can do is very limited. 3434 //go:nosplit 3435 //go:nowritebarrierrec 3436 func sigprofNonGoPC(pc uintptr) { 3437 if prof.hz != 0 { 3438 stk := []uintptr{ 3439 pc, 3440 funcPC(_ExternalCode) + sys.PCQuantum, 3441 } 3442 cpuprof.addNonGo(stk) 3443 } 3444 } 3445 3446 // Reports whether a function will set the SP 3447 // to an absolute value. Important that 3448 // we don't traceback when these are at the bottom 3449 // of the stack since we can't be sure that we will 3450 // find the caller. 3451 // 3452 // If the function is not on the bottom of the stack 3453 // we assume that it will have set it up so that traceback will be consistent, 3454 // either by being a traceback terminating function 3455 // or putting one on the stack at the right offset. 3456 func setsSP(pc uintptr) bool { 3457 f := findfunc(pc) 3458 if !f.valid() { 3459 // couldn't find the function for this PC, 3460 // so assume the worst and stop traceback 3461 return true 3462 } 3463 switch f.entry { 3464 case gogoPC, systemstackPC, mcallPC, morestackPC: 3465 return true 3466 } 3467 return false 3468 } 3469 3470 // setcpuprofilerate sets the CPU profiling rate to hz times per second. 3471 // If hz <= 0, setcpuprofilerate turns off CPU profiling. 3472 func setcpuprofilerate(hz int32) { 3473 // Force sane arguments. 3474 if hz < 0 { 3475 hz = 0 3476 } 3477 3478 // Disable preemption, otherwise we can be rescheduled to another thread 3479 // that has profiling enabled. 3480 _g_ := getg() 3481 _g_.m.locks++ 3482 3483 // Stop profiler on this thread so that it is safe to lock prof. 3484 // if a profiling signal came in while we had prof locked, 3485 // it would deadlock. 3486 setThreadCPUProfiler(0) 3487 3488 for !atomic.Cas(&prof.signalLock, 0, 1) { 3489 osyield() 3490 } 3491 if prof.hz != hz { 3492 setProcessCPUProfiler(hz) 3493 prof.hz = hz 3494 } 3495 atomic.Store(&prof.signalLock, 0) 3496 3497 lock(&sched.lock) 3498 sched.profilehz = hz 3499 unlock(&sched.lock) 3500 3501 if hz != 0 { 3502 setThreadCPUProfiler(hz) 3503 } 3504 3505 _g_.m.locks-- 3506 } 3507 3508 // Change number of processors. The world is stopped, sched is locked. 3509 // gcworkbufs are not being modified by either the GC or 3510 // the write barrier code. 3511 // Returns list of Ps with local work, they need to be scheduled by the caller. 3512 func procresize(nprocs int32) *p { 3513 old := gomaxprocs 3514 if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs { 3515 throw("procresize: invalid arg") 3516 } 3517 if trace.enabled { 3518 traceGomaxprocs(nprocs) 3519 } 3520 3521 // update statistics 3522 now := nanotime() 3523 if sched.procresizetime != 0 { 3524 sched.totaltime += int64(old) * (now - sched.procresizetime) 3525 } 3526 sched.procresizetime = now 3527 3528 // initialize new P's 3529 for i := int32(0); i < nprocs; i++ { 3530 pp := allp[i] 3531 if pp == nil { 3532 pp = new(p) 3533 pp.id = i 3534 pp.status = _Pgcstop 3535 pp.sudogcache = pp.sudogbuf[:0] 3536 for i := range pp.deferpool { 3537 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 3538 } 3539 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 3540 } 3541 if pp.mcache == nil { 3542 if old == 0 && i == 0 { 3543 if getg().m.mcache == nil { 3544 throw("missing mcache?") 3545 } 3546 pp.mcache = getg().m.mcache // bootstrap 3547 } else { 3548 pp.mcache = allocmcache() 3549 } 3550 } 3551 if raceenabled && pp.racectx == 0 { 3552 if old == 0 && i == 0 { 3553 pp.racectx = raceprocctx0 3554 raceprocctx0 = 0 // bootstrap 3555 } else { 3556 pp.racectx = raceproccreate() 3557 } 3558 } 3559 } 3560 3561 // free unused P's 3562 for i := nprocs; i < old; i++ { 3563 p := allp[i] 3564 if trace.enabled { 3565 if p == getg().m.p.ptr() { 3566 // moving to p[0], pretend that we were descheduled 3567 // and then scheduled again to keep the trace sane. 3568 traceGoSched() 3569 traceProcStop(p) 3570 } 3571 } 3572 // move all runnable goroutines to the global queue 3573 for p.runqhead != p.runqtail { 3574 // pop from tail of local queue 3575 p.runqtail-- 3576 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr() 3577 // push onto head of global queue 3578 globrunqputhead(gp) 3579 } 3580 if p.runnext != 0 { 3581 globrunqputhead(p.runnext.ptr()) 3582 p.runnext = 0 3583 } 3584 // if there's a background worker, make it runnable and put 3585 // it on the global queue so it can clean itself up 3586 if gp := p.gcBgMarkWorker.ptr(); gp != nil { 3587 casgstatus(gp, _Gwaiting, _Grunnable) 3588 if trace.enabled { 3589 traceGoUnpark(gp, 0) 3590 } 3591 globrunqput(gp) 3592 // This assignment doesn't race because the 3593 // world is stopped. 3594 p.gcBgMarkWorker.set(nil) 3595 } 3596 for i := range p.sudogbuf { 3597 p.sudogbuf[i] = nil 3598 } 3599 p.sudogcache = p.sudogbuf[:0] 3600 for i := range p.deferpool { 3601 for j := range p.deferpoolbuf[i] { 3602 p.deferpoolbuf[i][j] = nil 3603 } 3604 p.deferpool[i] = p.deferpoolbuf[i][:0] 3605 } 3606 freemcache(p.mcache) 3607 p.mcache = nil 3608 gfpurge(p) 3609 traceProcFree(p) 3610 if raceenabled { 3611 raceprocdestroy(p.racectx) 3612 p.racectx = 0 3613 } 3614 p.status = _Pdead 3615 // can't free P itself because it can be referenced by an M in syscall 3616 } 3617 3618 _g_ := getg() 3619 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 3620 // continue to use the current P 3621 _g_.m.p.ptr().status = _Prunning 3622 } else { 3623 // release the current P and acquire allp[0] 3624 if _g_.m.p != 0 { 3625 _g_.m.p.ptr().m = 0 3626 } 3627 _g_.m.p = 0 3628 _g_.m.mcache = nil 3629 p := allp[0] 3630 p.m = 0 3631 p.status = _Pidle 3632 acquirep(p) 3633 if trace.enabled { 3634 traceGoStart() 3635 } 3636 } 3637 var runnablePs *p 3638 for i := nprocs - 1; i >= 0; i-- { 3639 p := allp[i] 3640 if _g_.m.p.ptr() == p { 3641 continue 3642 } 3643 p.status = _Pidle 3644 if runqempty(p) { 3645 pidleput(p) 3646 } else { 3647 p.m.set(mget()) 3648 p.link.set(runnablePs) 3649 runnablePs = p 3650 } 3651 } 3652 stealOrder.reset(uint32(nprocs)) 3653 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 3654 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 3655 return runnablePs 3656 } 3657 3658 // Associate p and the current m. 3659 // 3660 // This function is allowed to have write barriers even if the caller 3661 // isn't because it immediately acquires _p_. 3662 // 3663 //go:yeswritebarrierrec 3664 func acquirep(_p_ *p) { 3665 // Do the part that isn't allowed to have write barriers. 3666 acquirep1(_p_) 3667 3668 // have p; write barriers now allowed 3669 _g_ := getg() 3670 _g_.m.mcache = _p_.mcache 3671 3672 if trace.enabled { 3673 traceProcStart() 3674 } 3675 } 3676 3677 // acquirep1 is the first step of acquirep, which actually acquires 3678 // _p_. This is broken out so we can disallow write barriers for this 3679 // part, since we don't yet have a P. 3680 // 3681 //go:nowritebarrierrec 3682 func acquirep1(_p_ *p) { 3683 _g_ := getg() 3684 3685 if _g_.m.p != 0 || _g_.m.mcache != nil { 3686 throw("acquirep: already in go") 3687 } 3688 if _p_.m != 0 || _p_.status != _Pidle { 3689 id := int32(0) 3690 if _p_.m != 0 { 3691 id = _p_.m.ptr().id 3692 } 3693 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 3694 throw("acquirep: invalid p state") 3695 } 3696 _g_.m.p.set(_p_) 3697 _p_.m.set(_g_.m) 3698 _p_.status = _Prunning 3699 } 3700 3701 // Disassociate p and the current m. 3702 func releasep() *p { 3703 _g_ := getg() 3704 3705 if _g_.m.p == 0 || _g_.m.mcache == nil { 3706 throw("releasep: invalid arg") 3707 } 3708 _p_ := _g_.m.p.ptr() 3709 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 3710 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 3711 throw("releasep: invalid p state") 3712 } 3713 if trace.enabled { 3714 traceProcStop(_g_.m.p.ptr()) 3715 } 3716 _g_.m.p = 0 3717 _g_.m.mcache = nil 3718 _p_.m = 0 3719 _p_.status = _Pidle 3720 return _p_ 3721 } 3722 3723 func incidlelocked(v int32) { 3724 lock(&sched.lock) 3725 sched.nmidlelocked += v 3726 if v > 0 { 3727 checkdead() 3728 } 3729 unlock(&sched.lock) 3730 } 3731 3732 // Check for deadlock situation. 3733 // The check is based on number of running M's, if 0 -> deadlock. 3734 func checkdead() { 3735 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 3736 // there are no running goroutines. The calling program is 3737 // assumed to be running. 3738 if islibrary || isarchive { 3739 return 3740 } 3741 3742 // If we are dying because of a signal caught on an already idle thread, 3743 // freezetheworld will cause all running threads to block. 3744 // And runtime will essentially enter into deadlock state, 3745 // except that there is a thread that will call exit soon. 3746 if panicking > 0 { 3747 return 3748 } 3749 3750 // -1 for sysmon 3751 run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1 3752 if run > 0 { 3753 return 3754 } 3755 if run < 0 { 3756 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n") 3757 throw("checkdead: inconsistent counts") 3758 } 3759 3760 grunning := 0 3761 lock(&allglock) 3762 for i := 0; i < len(allgs); i++ { 3763 gp := allgs[i] 3764 if isSystemGoroutine(gp) { 3765 continue 3766 } 3767 s := readgstatus(gp) 3768 switch s &^ _Gscan { 3769 case _Gwaiting: 3770 grunning++ 3771 case _Grunnable, 3772 _Grunning, 3773 _Gsyscall: 3774 unlock(&allglock) 3775 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 3776 throw("checkdead: runnable g") 3777 } 3778 } 3779 unlock(&allglock) 3780 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 3781 throw("no goroutines (main called runtime.Goexit) - deadlock!") 3782 } 3783 3784 // Maybe jump time forward for playground. 3785 gp := timejump() 3786 if gp != nil { 3787 casgstatus(gp, _Gwaiting, _Grunnable) 3788 globrunqput(gp) 3789 _p_ := pidleget() 3790 if _p_ == nil { 3791 throw("checkdead: no p for timer") 3792 } 3793 mp := mget() 3794 if mp == nil { 3795 // There should always be a free M since 3796 // nothing is running. 3797 throw("checkdead: no m for timer") 3798 } 3799 mp.nextp.set(_p_) 3800 notewakeup(&mp.park) 3801 return 3802 } 3803 3804 getg().m.throwing = -1 // do not dump full stacks 3805 throw("all goroutines are asleep - deadlock!") 3806 } 3807 3808 // forcegcperiod is the maximum time in nanoseconds between garbage 3809 // collections. If we go this long without a garbage collection, one 3810 // is forced to run. 3811 // 3812 // This is a variable for testing purposes. It normally doesn't change. 3813 var forcegcperiod int64 = 2 * 60 * 1e9 3814 3815 // Always runs without a P, so write barriers are not allowed. 3816 // 3817 //go:nowritebarrierrec 3818 func sysmon() { 3819 // If a heap span goes unused for 5 minutes after a garbage collection, 3820 // we hand it back to the operating system. 3821 scavengelimit := int64(5 * 60 * 1e9) 3822 3823 if debug.scavenge > 0 { 3824 // Scavenge-a-lot for testing. 3825 forcegcperiod = 10 * 1e6 3826 scavengelimit = 20 * 1e6 3827 } 3828 3829 lastscavenge := nanotime() 3830 nscavenge := 0 3831 3832 lasttrace := int64(0) 3833 idle := 0 // how many cycles in succession we had not wokeup somebody 3834 delay := uint32(0) 3835 for { 3836 if idle == 0 { // start with 20us sleep... 3837 delay = 20 3838 } else if idle > 50 { // start doubling the sleep after 1ms... 3839 delay *= 2 3840 } 3841 if delay > 10*1000 { // up to 10ms 3842 delay = 10 * 1000 3843 } 3844 usleep(delay) 3845 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { 3846 lock(&sched.lock) 3847 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 3848 atomic.Store(&sched.sysmonwait, 1) 3849 unlock(&sched.lock) 3850 // Make wake-up period small enough 3851 // for the sampling to be correct. 3852 maxsleep := forcegcperiod / 2 3853 if scavengelimit < forcegcperiod { 3854 maxsleep = scavengelimit / 2 3855 } 3856 shouldRelax := true 3857 if osRelaxMinNS > 0 { 3858 lock(&timers.lock) 3859 if timers.sleeping { 3860 now := nanotime() 3861 next := timers.sleepUntil 3862 if next-now < osRelaxMinNS { 3863 shouldRelax = false 3864 } 3865 } 3866 unlock(&timers.lock) 3867 } 3868 if shouldRelax { 3869 osRelax(true) 3870 } 3871 notetsleep(&sched.sysmonnote, maxsleep) 3872 if shouldRelax { 3873 osRelax(false) 3874 } 3875 lock(&sched.lock) 3876 atomic.Store(&sched.sysmonwait, 0) 3877 noteclear(&sched.sysmonnote) 3878 idle = 0 3879 delay = 20 3880 } 3881 unlock(&sched.lock) 3882 } 3883 // trigger libc interceptors if needed 3884 if *cgo_yield != nil { 3885 asmcgocall(*cgo_yield, nil) 3886 } 3887 // poll network if not polled for more than 10ms 3888 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 3889 now := nanotime() 3890 if lastpoll != 0 && lastpoll+10*1000*1000 < now { 3891 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 3892 gp := netpoll(false) // non-blocking - returns list of goroutines 3893 if gp != nil { 3894 // Need to decrement number of idle locked M's 3895 // (pretending that one more is running) before injectglist. 3896 // Otherwise it can lead to the following situation: 3897 // injectglist grabs all P's but before it starts M's to run the P's, 3898 // another M returns from syscall, finishes running its G, 3899 // observes that there is no work to do and no other running M's 3900 // and reports deadlock. 3901 incidlelocked(-1) 3902 injectglist(gp) 3903 incidlelocked(1) 3904 } 3905 } 3906 // retake P's blocked in syscalls 3907 // and preempt long running G's 3908 if retake(now) != 0 { 3909 idle = 0 3910 } else { 3911 idle++ 3912 } 3913 // check if we need to force a GC 3914 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { 3915 lock(&forcegc.lock) 3916 forcegc.idle = 0 3917 forcegc.g.schedlink = 0 3918 injectglist(forcegc.g) 3919 unlock(&forcegc.lock) 3920 } 3921 // scavenge heap once in a while 3922 if lastscavenge+scavengelimit/2 < now { 3923 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) 3924 lastscavenge = now 3925 nscavenge++ 3926 } 3927 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { 3928 lasttrace = now 3929 schedtrace(debug.scheddetail > 0) 3930 } 3931 } 3932 } 3933 3934 type sysmontick struct { 3935 schedtick uint32 3936 schedwhen int64 3937 syscalltick uint32 3938 syscallwhen int64 3939 } 3940 3941 // forcePreemptNS is the time slice given to a G before it is 3942 // preempted. 3943 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 3944 3945 func retake(now int64) uint32 { 3946 n := 0 3947 for i := int32(0); i < gomaxprocs; i++ { 3948 _p_ := allp[i] 3949 if _p_ == nil { 3950 continue 3951 } 3952 pd := &_p_.sysmontick 3953 s := _p_.status 3954 if s == _Psyscall { 3955 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 3956 t := int64(_p_.syscalltick) 3957 if int64(pd.syscalltick) != t { 3958 pd.syscalltick = uint32(t) 3959 pd.syscallwhen = now 3960 continue 3961 } 3962 // On the one hand we don't want to retake Ps if there is no other work to do, 3963 // but on the other hand we want to retake them eventually 3964 // because they can prevent the sysmon thread from deep sleep. 3965 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 3966 continue 3967 } 3968 // Need to decrement number of idle locked M's 3969 // (pretending that one more is running) before the CAS. 3970 // Otherwise the M from which we retake can exit the syscall, 3971 // increment nmidle and report deadlock. 3972 incidlelocked(-1) 3973 if atomic.Cas(&_p_.status, s, _Pidle) { 3974 if trace.enabled { 3975 traceGoSysBlock(_p_) 3976 traceProcStop(_p_) 3977 } 3978 n++ 3979 _p_.syscalltick++ 3980 handoffp(_p_) 3981 } 3982 incidlelocked(1) 3983 } else if s == _Prunning { 3984 // Preempt G if it's running for too long. 3985 t := int64(_p_.schedtick) 3986 if int64(pd.schedtick) != t { 3987 pd.schedtick = uint32(t) 3988 pd.schedwhen = now 3989 continue 3990 } 3991 if pd.schedwhen+forcePreemptNS > now { 3992 continue 3993 } 3994 preemptone(_p_) 3995 } 3996 } 3997 return uint32(n) 3998 } 3999 4000 // Tell all goroutines that they have been preempted and they should stop. 4001 // This function is purely best-effort. It can fail to inform a goroutine if a 4002 // processor just started running it. 4003 // No locks need to be held. 4004 // Returns true if preemption request was issued to at least one goroutine. 4005 func preemptall() bool { 4006 res := false 4007 for i := int32(0); i < gomaxprocs; i++ { 4008 _p_ := allp[i] 4009 if _p_ == nil || _p_.status != _Prunning { 4010 continue 4011 } 4012 if preemptone(_p_) { 4013 res = true 4014 } 4015 } 4016 return res 4017 } 4018 4019 // Tell the goroutine running on processor P to stop. 4020 // This function is purely best-effort. It can incorrectly fail to inform the 4021 // goroutine. It can send inform the wrong goroutine. Even if it informs the 4022 // correct goroutine, that goroutine might ignore the request if it is 4023 // simultaneously executing newstack. 4024 // No lock needs to be held. 4025 // Returns true if preemption request was issued. 4026 // The actual preemption will happen at some point in the future 4027 // and will be indicated by the gp->status no longer being 4028 // Grunning 4029 func preemptone(_p_ *p) bool { 4030 mp := _p_.m.ptr() 4031 if mp == nil || mp == getg().m { 4032 return false 4033 } 4034 gp := mp.curg 4035 if gp == nil || gp == mp.g0 { 4036 return false 4037 } 4038 4039 gp.preempt = true 4040 4041 // Every call in a go routine checks for stack overflow by 4042 // comparing the current stack pointer to gp->stackguard0. 4043 // Setting gp->stackguard0 to StackPreempt folds 4044 // preemption into the normal stack overflow check. 4045 gp.stackguard0 = stackPreempt 4046 return true 4047 } 4048 4049 var starttime int64 4050 4051 func schedtrace(detailed bool) { 4052 now := nanotime() 4053 if starttime == 0 { 4054 starttime = now 4055 } 4056 4057 lock(&sched.lock) 4058 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 4059 if detailed { 4060 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 4061 } 4062 // We must be careful while reading data from P's, M's and G's. 4063 // Even if we hold schedlock, most data can be changed concurrently. 4064 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 4065 for i := int32(0); i < gomaxprocs; i++ { 4066 _p_ := allp[i] 4067 if _p_ == nil { 4068 continue 4069 } 4070 mp := _p_.m.ptr() 4071 h := atomic.Load(&_p_.runqhead) 4072 t := atomic.Load(&_p_.runqtail) 4073 if detailed { 4074 id := int32(-1) 4075 if mp != nil { 4076 id = mp.id 4077 } 4078 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") 4079 } else { 4080 // In non-detailed mode format lengths of per-P run queues as: 4081 // [len1 len2 len3 len4] 4082 print(" ") 4083 if i == 0 { 4084 print("[") 4085 } 4086 print(t - h) 4087 if i == gomaxprocs-1 { 4088 print("]\n") 4089 } 4090 } 4091 } 4092 4093 if !detailed { 4094 unlock(&sched.lock) 4095 return 4096 } 4097 4098 for mp := allm; mp != nil; mp = mp.alllink { 4099 _p_ := mp.p.ptr() 4100 gp := mp.curg 4101 lockedg := mp.lockedg 4102 id1 := int32(-1) 4103 if _p_ != nil { 4104 id1 = _p_.id 4105 } 4106 id2 := int64(-1) 4107 if gp != nil { 4108 id2 = gp.goid 4109 } 4110 id3 := int64(-1) 4111 if lockedg != nil { 4112 id3 = lockedg.goid 4113 } 4114 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") 4115 } 4116 4117 lock(&allglock) 4118 for gi := 0; gi < len(allgs); gi++ { 4119 gp := allgs[gi] 4120 mp := gp.m 4121 lockedm := gp.lockedm 4122 id1 := int32(-1) 4123 if mp != nil { 4124 id1 = mp.id 4125 } 4126 id2 := int32(-1) 4127 if lockedm != nil { 4128 id2 = lockedm.id 4129 } 4130 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n") 4131 } 4132 unlock(&allglock) 4133 unlock(&sched.lock) 4134 } 4135 4136 // Put mp on midle list. 4137 // Sched must be locked. 4138 // May run during STW, so write barriers are not allowed. 4139 //go:nowritebarrierrec 4140 func mput(mp *m) { 4141 mp.schedlink = sched.midle 4142 sched.midle.set(mp) 4143 sched.nmidle++ 4144 checkdead() 4145 } 4146 4147 // Try to get an m from midle list. 4148 // Sched must be locked. 4149 // May run during STW, so write barriers are not allowed. 4150 //go:nowritebarrierrec 4151 func mget() *m { 4152 mp := sched.midle.ptr() 4153 if mp != nil { 4154 sched.midle = mp.schedlink 4155 sched.nmidle-- 4156 } 4157 return mp 4158 } 4159 4160 // Put gp on the global runnable queue. 4161 // Sched must be locked. 4162 // May run during STW, so write barriers are not allowed. 4163 //go:nowritebarrierrec 4164 func globrunqput(gp *g) { 4165 gp.schedlink = 0 4166 if sched.runqtail != 0 { 4167 sched.runqtail.ptr().schedlink.set(gp) 4168 } else { 4169 sched.runqhead.set(gp) 4170 } 4171 sched.runqtail.set(gp) 4172 sched.runqsize++ 4173 } 4174 4175 // Put gp at the head of the global runnable queue. 4176 // Sched must be locked. 4177 // May run during STW, so write barriers are not allowed. 4178 //go:nowritebarrierrec 4179 func globrunqputhead(gp *g) { 4180 gp.schedlink = sched.runqhead 4181 sched.runqhead.set(gp) 4182 if sched.runqtail == 0 { 4183 sched.runqtail.set(gp) 4184 } 4185 sched.runqsize++ 4186 } 4187 4188 // Put a batch of runnable goroutines on the global runnable queue. 4189 // Sched must be locked. 4190 func globrunqputbatch(ghead *g, gtail *g, n int32) { 4191 gtail.schedlink = 0 4192 if sched.runqtail != 0 { 4193 sched.runqtail.ptr().schedlink.set(ghead) 4194 } else { 4195 sched.runqhead.set(ghead) 4196 } 4197 sched.runqtail.set(gtail) 4198 sched.runqsize += n 4199 } 4200 4201 // Try get a batch of G's from the global runnable queue. 4202 // Sched must be locked. 4203 func globrunqget(_p_ *p, max int32) *g { 4204 if sched.runqsize == 0 { 4205 return nil 4206 } 4207 4208 n := sched.runqsize/gomaxprocs + 1 4209 if n > sched.runqsize { 4210 n = sched.runqsize 4211 } 4212 if max > 0 && n > max { 4213 n = max 4214 } 4215 if n > int32(len(_p_.runq))/2 { 4216 n = int32(len(_p_.runq)) / 2 4217 } 4218 4219 sched.runqsize -= n 4220 if sched.runqsize == 0 { 4221 sched.runqtail = 0 4222 } 4223 4224 gp := sched.runqhead.ptr() 4225 sched.runqhead = gp.schedlink 4226 n-- 4227 for ; n > 0; n-- { 4228 gp1 := sched.runqhead.ptr() 4229 sched.runqhead = gp1.schedlink 4230 runqput(_p_, gp1, false) 4231 } 4232 return gp 4233 } 4234 4235 // Put p to on _Pidle list. 4236 // Sched must be locked. 4237 // May run during STW, so write barriers are not allowed. 4238 //go:nowritebarrierrec 4239 func pidleput(_p_ *p) { 4240 if !runqempty(_p_) { 4241 throw("pidleput: P has non-empty run queue") 4242 } 4243 _p_.link = sched.pidle 4244 sched.pidle.set(_p_) 4245 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 4246 } 4247 4248 // Try get a p from _Pidle list. 4249 // Sched must be locked. 4250 // May run during STW, so write barriers are not allowed. 4251 //go:nowritebarrierrec 4252 func pidleget() *p { 4253 _p_ := sched.pidle.ptr() 4254 if _p_ != nil { 4255 sched.pidle = _p_.link 4256 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 4257 } 4258 return _p_ 4259 } 4260 4261 // runqempty returns true if _p_ has no Gs on its local run queue. 4262 // It never returns true spuriously. 4263 func runqempty(_p_ *p) bool { 4264 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, 4265 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext. 4266 // Simply observing that runqhead == runqtail and then observing that runqnext == nil 4267 // does not mean the queue is empty. 4268 for { 4269 head := atomic.Load(&_p_.runqhead) 4270 tail := atomic.Load(&_p_.runqtail) 4271 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext))) 4272 if tail == atomic.Load(&_p_.runqtail) { 4273 return head == tail && runnext == 0 4274 } 4275 } 4276 } 4277 4278 // To shake out latent assumptions about scheduling order, 4279 // we introduce some randomness into scheduling decisions 4280 // when running with the race detector. 4281 // The need for this was made obvious by changing the 4282 // (deterministic) scheduling order in Go 1.5 and breaking 4283 // many poorly-written tests. 4284 // With the randomness here, as long as the tests pass 4285 // consistently with -race, they shouldn't have latent scheduling 4286 // assumptions. 4287 const randomizeScheduler = raceenabled 4288 4289 // runqput tries to put g on the local runnable queue. 4290 // If next if false, runqput adds g to the tail of the runnable queue. 4291 // If next is true, runqput puts g in the _p_.runnext slot. 4292 // If the run queue is full, runnext puts g on the global queue. 4293 // Executed only by the owner P. 4294 func runqput(_p_ *p, gp *g, next bool) { 4295 if randomizeScheduler && next && fastrand()%2 == 0 { 4296 next = false 4297 } 4298 4299 if next { 4300 retryNext: 4301 oldnext := _p_.runnext 4302 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 4303 goto retryNext 4304 } 4305 if oldnext == 0 { 4306 return 4307 } 4308 // Kick the old runnext out to the regular run queue. 4309 gp = oldnext.ptr() 4310 } 4311 4312 retry: 4313 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 4314 t := _p_.runqtail 4315 if t-h < uint32(len(_p_.runq)) { 4316 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 4317 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 4318 return 4319 } 4320 if runqputslow(_p_, gp, h, t) { 4321 return 4322 } 4323 // the queue is not full, now the put above must succeed 4324 goto retry 4325 } 4326 4327 // Put g and a batch of work from local runnable queue on global queue. 4328 // Executed only by the owner P. 4329 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 4330 var batch [len(_p_.runq)/2 + 1]*g 4331 4332 // First, grab a batch from local queue. 4333 n := t - h 4334 n = n / 2 4335 if n != uint32(len(_p_.runq)/2) { 4336 throw("runqputslow: queue is not full") 4337 } 4338 for i := uint32(0); i < n; i++ { 4339 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 4340 } 4341 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4342 return false 4343 } 4344 batch[n] = gp 4345 4346 if randomizeScheduler { 4347 for i := uint32(1); i <= n; i++ { 4348 j := fastrandn(i + 1) 4349 batch[i], batch[j] = batch[j], batch[i] 4350 } 4351 } 4352 4353 // Link the goroutines. 4354 for i := uint32(0); i < n; i++ { 4355 batch[i].schedlink.set(batch[i+1]) 4356 } 4357 4358 // Now put the batch on global queue. 4359 lock(&sched.lock) 4360 globrunqputbatch(batch[0], batch[n], int32(n+1)) 4361 unlock(&sched.lock) 4362 return true 4363 } 4364 4365 // Get g from local runnable queue. 4366 // If inheritTime is true, gp should inherit the remaining time in the 4367 // current time slice. Otherwise, it should start a new time slice. 4368 // Executed only by the owner P. 4369 func runqget(_p_ *p) (gp *g, inheritTime bool) { 4370 // If there's a runnext, it's the next G to run. 4371 for { 4372 next := _p_.runnext 4373 if next == 0 { 4374 break 4375 } 4376 if _p_.runnext.cas(next, 0) { 4377 return next.ptr(), true 4378 } 4379 } 4380 4381 for { 4382 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4383 t := _p_.runqtail 4384 if t == h { 4385 return nil, false 4386 } 4387 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 4388 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume 4389 return gp, false 4390 } 4391 } 4392 } 4393 4394 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 4395 // Batch is a ring buffer starting at batchHead. 4396 // Returns number of grabbed goroutines. 4397 // Can be executed by any P. 4398 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 4399 for { 4400 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4401 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer 4402 n := t - h 4403 n = n - n/2 4404 if n == 0 { 4405 if stealRunNextG { 4406 // Try to steal from _p_.runnext. 4407 if next := _p_.runnext; next != 0 { 4408 // Sleep to ensure that _p_ isn't about to run the g we 4409 // are about to steal. 4410 // The important use case here is when the g running on _p_ 4411 // ready()s another g and then almost immediately blocks. 4412 // Instead of stealing runnext in this window, back off 4413 // to give _p_ a chance to schedule runnext. This will avoid 4414 // thrashing gs between different Ps. 4415 // A sync chan send/recv takes ~50ns as of time of writing, 4416 // so 3us gives ~50x overshoot. 4417 if GOOS != "windows" { 4418 usleep(3) 4419 } else { 4420 // On windows system timer granularity is 1-15ms, 4421 // which is way too much for this optimization. 4422 // So just yield. 4423 osyield() 4424 } 4425 if !_p_.runnext.cas(next, 0) { 4426 continue 4427 } 4428 batch[batchHead%uint32(len(batch))] = next 4429 return 1 4430 } 4431 } 4432 return 0 4433 } 4434 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 4435 continue 4436 } 4437 for i := uint32(0); i < n; i++ { 4438 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 4439 batch[(batchHead+i)%uint32(len(batch))] = g 4440 } 4441 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4442 return n 4443 } 4444 } 4445 } 4446 4447 // Steal half of elements from local runnable queue of p2 4448 // and put onto local runnable queue of p. 4449 // Returns one of the stolen elements (or nil if failed). 4450 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 4451 t := _p_.runqtail 4452 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 4453 if n == 0 { 4454 return nil 4455 } 4456 n-- 4457 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 4458 if n == 0 { 4459 return gp 4460 } 4461 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 4462 if t-h+n >= uint32(len(_p_.runq)) { 4463 throw("runqsteal: runq overflow") 4464 } 4465 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 4466 return gp 4467 } 4468 4469 //go:linkname setMaxThreads runtime/debug.setMaxThreads 4470 func setMaxThreads(in int) (out int) { 4471 lock(&sched.lock) 4472 out = int(sched.maxmcount) 4473 if in > 0x7fffffff { // MaxInt32 4474 sched.maxmcount = 0x7fffffff 4475 } else { 4476 sched.maxmcount = int32(in) 4477 } 4478 checkmcount() 4479 unlock(&sched.lock) 4480 return 4481 } 4482 4483 func haveexperiment(name string) bool { 4484 if name == "framepointer" { 4485 return framepointer_enabled // set by linker 4486 } 4487 x := sys.Goexperiment 4488 for x != "" { 4489 xname := "" 4490 i := index(x, ",") 4491 if i < 0 { 4492 xname, x = x, "" 4493 } else { 4494 xname, x = x[:i], x[i+1:] 4495 } 4496 if xname == name { 4497 return true 4498 } 4499 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name { 4500 return false 4501 } 4502 } 4503 return false 4504 } 4505 4506 //go:nosplit 4507 func procPin() int { 4508 _g_ := getg() 4509 mp := _g_.m 4510 4511 mp.locks++ 4512 return int(mp.p.ptr().id) 4513 } 4514 4515 //go:nosplit 4516 func procUnpin() { 4517 _g_ := getg() 4518 _g_.m.locks-- 4519 } 4520 4521 //go:linkname sync_runtime_procPin sync.runtime_procPin 4522 //go:nosplit 4523 func sync_runtime_procPin() int { 4524 return procPin() 4525 } 4526 4527 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 4528 //go:nosplit 4529 func sync_runtime_procUnpin() { 4530 procUnpin() 4531 } 4532 4533 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 4534 //go:nosplit 4535 func sync_atomic_runtime_procPin() int { 4536 return procPin() 4537 } 4538 4539 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 4540 //go:nosplit 4541 func sync_atomic_runtime_procUnpin() { 4542 procUnpin() 4543 } 4544 4545 // Active spinning for sync.Mutex. 4546 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 4547 //go:nosplit 4548 func sync_runtime_canSpin(i int) bool { 4549 // sync.Mutex is cooperative, so we are conservative with spinning. 4550 // Spin only few times and only if running on a multicore machine and 4551 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 4552 // As opposed to runtime mutex we don't do passive spinning here, 4553 // because there can be work on global runq on on other Ps. 4554 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 4555 return false 4556 } 4557 if p := getg().m.p.ptr(); !runqempty(p) { 4558 return false 4559 } 4560 return true 4561 } 4562 4563 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 4564 //go:nosplit 4565 func sync_runtime_doSpin() { 4566 procyield(active_spin_cnt) 4567 } 4568 4569 var stealOrder randomOrder 4570 4571 // randomOrder/randomEnum are helper types for randomized work stealing. 4572 // They allow to enumerate all Ps in different pseudo-random orders without repetitions. 4573 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS 4574 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration. 4575 type randomOrder struct { 4576 count uint32 4577 coprimes []uint32 4578 } 4579 4580 type randomEnum struct { 4581 i uint32 4582 count uint32 4583 pos uint32 4584 inc uint32 4585 } 4586 4587 func (ord *randomOrder) reset(count uint32) { 4588 ord.count = count 4589 ord.coprimes = ord.coprimes[:0] 4590 for i := uint32(1); i <= count; i++ { 4591 if gcd(i, count) == 1 { 4592 ord.coprimes = append(ord.coprimes, i) 4593 } 4594 } 4595 } 4596 4597 func (ord *randomOrder) start(i uint32) randomEnum { 4598 return randomEnum{ 4599 count: ord.count, 4600 pos: i % ord.count, 4601 inc: ord.coprimes[i%uint32(len(ord.coprimes))], 4602 } 4603 } 4604 4605 func (enum *randomEnum) done() bool { 4606 return enum.i == enum.count 4607 } 4608 4609 func (enum *randomEnum) next() { 4610 enum.i++ 4611 enum.pos = (enum.pos + enum.inc) % enum.count 4612 } 4613 4614 func (enum *randomEnum) position() uint32 { 4615 return enum.pos 4616 } 4617 4618 func gcd(a, b uint32) uint32 { 4619 for b != 0 { 4620 a, b = b, a%b 4621 } 4622 return a 4623 }