github.com/peggyl/go@v0.0.0-20151008231540-ae315999c2d5/src/runtime/proc1.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 var ( 10 m0 m 11 g0 g 12 ) 13 14 // Goroutine scheduler 15 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 16 // 17 // The main concepts are: 18 // G - goroutine. 19 // M - worker thread, or machine. 20 // P - processor, a resource that is required to execute Go code. 21 // M must have an associated P to execute Go code, however it can be 22 // blocked or in a syscall w/o an associated P. 23 // 24 // Design doc at https://golang.org/s/go11sched. 25 26 const ( 27 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 28 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 29 _GoidCacheBatch = 16 30 ) 31 32 // The bootstrap sequence is: 33 // 34 // call osinit 35 // call schedinit 36 // make & queue new G 37 // call runtime·mstart 38 // 39 // The new G calls runtime·main. 40 func schedinit() { 41 // raceinit must be the first call to race detector. 42 // In particular, it must be done before mallocinit below calls racemapshadow. 43 _g_ := getg() 44 if raceenabled { 45 _g_.racectx = raceinit() 46 } 47 48 sched.maxmcount = 10000 49 50 // Cache the framepointer experiment. This affects stack unwinding. 51 framepointer_enabled = haveexperiment("framepointer") 52 53 tracebackinit() 54 moduledataverify() 55 stackinit() 56 mallocinit() 57 mcommoninit(_g_.m) 58 59 goargs() 60 goenvs() 61 parsedebugvars() 62 gcinit() 63 64 sched.lastpoll = uint64(nanotime()) 65 procs := int(ncpu) 66 if n := atoi(gogetenv("GOMAXPROCS")); n > 0 { 67 if n > _MaxGomaxprocs { 68 n = _MaxGomaxprocs 69 } 70 procs = n 71 } 72 if procresize(int32(procs)) != nil { 73 throw("unknown runnable goroutine during bootstrap") 74 } 75 76 if buildVersion == "" { 77 // Condition should never trigger. This code just serves 78 // to ensure runtime·buildVersion is kept in the resulting binary. 79 buildVersion = "unknown" 80 } 81 } 82 83 func dumpgstatus(gp *g) { 84 _g_ := getg() 85 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 86 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 87 } 88 89 func checkmcount() { 90 // sched lock is held 91 if sched.mcount > sched.maxmcount { 92 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 93 throw("thread exhaustion") 94 } 95 } 96 97 func mcommoninit(mp *m) { 98 _g_ := getg() 99 100 // g0 stack won't make sense for user (and is not necessary unwindable). 101 if _g_ != _g_.m.g0 { 102 callers(1, mp.createstack[:]) 103 } 104 105 mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks()) 106 if mp.fastrand == 0 { 107 mp.fastrand = 0x49f6428a 108 } 109 110 lock(&sched.lock) 111 mp.id = sched.mcount 112 sched.mcount++ 113 checkmcount() 114 mpreinit(mp) 115 if mp.gsignal != nil { 116 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 117 } 118 119 // Add to allm so garbage collector doesn't free g->m 120 // when it is just in a register or thread-local storage. 121 mp.alllink = allm 122 123 // NumCgoCall() iterates over allm w/o schedlock, 124 // so we need to publish it safely. 125 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 126 unlock(&sched.lock) 127 } 128 129 // Mark gp ready to run. 130 func ready(gp *g, traceskip int) { 131 if trace.enabled { 132 traceGoUnpark(gp, traceskip) 133 } 134 135 status := readgstatus(gp) 136 137 // Mark runnable. 138 _g_ := getg() 139 _g_.m.locks++ // disable preemption because it can be holding p in a local var 140 if status&^_Gscan != _Gwaiting { 141 dumpgstatus(gp) 142 throw("bad g->status in ready") 143 } 144 145 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 146 casgstatus(gp, _Gwaiting, _Grunnable) 147 runqput(_g_.m.p.ptr(), gp, true) 148 if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 { // TODO: fast atomic 149 wakep() 150 } 151 _g_.m.locks-- 152 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 153 _g_.stackguard0 = stackPreempt 154 } 155 } 156 157 func gcprocs() int32 { 158 // Figure out how many CPUs to use during GC. 159 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 160 lock(&sched.lock) 161 n := gomaxprocs 162 if n > ncpu { 163 n = ncpu 164 } 165 if n > _MaxGcproc { 166 n = _MaxGcproc 167 } 168 if n > sched.nmidle+1 { // one M is currently running 169 n = sched.nmidle + 1 170 } 171 unlock(&sched.lock) 172 return n 173 } 174 175 func needaddgcproc() bool { 176 lock(&sched.lock) 177 n := gomaxprocs 178 if n > ncpu { 179 n = ncpu 180 } 181 if n > _MaxGcproc { 182 n = _MaxGcproc 183 } 184 n -= sched.nmidle + 1 // one M is currently running 185 unlock(&sched.lock) 186 return n > 0 187 } 188 189 func helpgc(nproc int32) { 190 _g_ := getg() 191 lock(&sched.lock) 192 pos := 0 193 for n := int32(1); n < nproc; n++ { // one M is currently running 194 if allp[pos].mcache == _g_.m.mcache { 195 pos++ 196 } 197 mp := mget() 198 if mp == nil { 199 throw("gcprocs inconsistency") 200 } 201 mp.helpgc = n 202 mp.p.set(allp[pos]) 203 mp.mcache = allp[pos].mcache 204 pos++ 205 notewakeup(&mp.park) 206 } 207 unlock(&sched.lock) 208 } 209 210 // freezeStopWait is a large value that freezetheworld sets 211 // sched.stopwait to in order to request that all Gs permanently stop. 212 const freezeStopWait = 0x7fffffff 213 214 // Similar to stopTheWorld but best-effort and can be called several times. 215 // There is no reverse operation, used during crashing. 216 // This function must not lock any mutexes. 217 func freezetheworld() { 218 // stopwait and preemption requests can be lost 219 // due to races with concurrently executing threads, 220 // so try several times 221 for i := 0; i < 5; i++ { 222 // this should tell the scheduler to not start any new goroutines 223 sched.stopwait = freezeStopWait 224 atomicstore(&sched.gcwaiting, 1) 225 // this should stop running goroutines 226 if !preemptall() { 227 break // no running goroutines 228 } 229 usleep(1000) 230 } 231 // to be sure 232 usleep(1000) 233 preemptall() 234 usleep(1000) 235 } 236 237 func isscanstatus(status uint32) bool { 238 if status == _Gscan { 239 throw("isscanstatus: Bad status Gscan") 240 } 241 return status&_Gscan == _Gscan 242 } 243 244 // All reads and writes of g's status go through readgstatus, casgstatus 245 // castogscanstatus, casfrom_Gscanstatus. 246 //go:nosplit 247 func readgstatus(gp *g) uint32 { 248 return atomicload(&gp.atomicstatus) 249 } 250 251 // Ownership of gscanvalid: 252 // 253 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), 254 // then gp owns gp.gscanvalid, and other goroutines must not modify it. 255 // 256 // Otherwise, a second goroutine can lock the scan state by setting _Gscan 257 // in the status bit and then modify gscanvalid, and then unlock the scan state. 258 // 259 // Note that the first condition implies an exception to the second: 260 // if a second goroutine changes gp's status to _Grunning|_Gscan, 261 // that second goroutine still does not have the right to modify gscanvalid. 262 263 // The Gscanstatuses are acting like locks and this releases them. 264 // If it proves to be a performance hit we should be able to make these 265 // simple atomic stores but for now we are going to throw if 266 // we see an inconsistent state. 267 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 268 success := false 269 270 // Check that transition is valid. 271 switch oldval { 272 default: 273 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 274 dumpgstatus(gp) 275 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 276 case _Gscanrunnable, 277 _Gscanwaiting, 278 _Gscanrunning, 279 _Gscansyscall: 280 if newval == oldval&^_Gscan { 281 success = cas(&gp.atomicstatus, oldval, newval) 282 } 283 case _Gscanenqueue: 284 if newval == _Gwaiting { 285 success = cas(&gp.atomicstatus, oldval, newval) 286 } 287 } 288 if !success { 289 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 290 dumpgstatus(gp) 291 throw("casfrom_Gscanstatus: gp->status is not in scan state") 292 } 293 if newval == _Grunning { 294 gp.gcscanvalid = false 295 } 296 } 297 298 // This will return false if the gp is not in the expected status and the cas fails. 299 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 300 func castogscanstatus(gp *g, oldval, newval uint32) bool { 301 switch oldval { 302 case _Grunnable, 303 _Gwaiting, 304 _Gsyscall: 305 if newval == oldval|_Gscan { 306 return cas(&gp.atomicstatus, oldval, newval) 307 } 308 case _Grunning: 309 if newval == _Gscanrunning || newval == _Gscanenqueue { 310 return cas(&gp.atomicstatus, oldval, newval) 311 } 312 } 313 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 314 throw("castogscanstatus") 315 panic("not reached") 316 } 317 318 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 319 // and casfrom_Gscanstatus instead. 320 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 321 // put it in the Gscan state is finished. 322 //go:nosplit 323 func casgstatus(gp *g, oldval, newval uint32) { 324 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 325 systemstack(func() { 326 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 327 throw("casgstatus: bad incoming values") 328 }) 329 } 330 331 if oldval == _Grunning && gp.gcscanvalid { 332 // If oldvall == _Grunning, then the actual status must be 333 // _Grunning or _Grunning|_Gscan; either way, 334 // we own gp.gcscanvalid, so it's safe to read. 335 // gp.gcscanvalid must not be true when we are running. 336 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n") 337 throw("casgstatus") 338 } 339 340 // loop if gp->atomicstatus is in a scan state giving 341 // GC time to finish and change the state to oldval. 342 for !cas(&gp.atomicstatus, oldval, newval) { 343 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 344 systemstack(func() { 345 throw("casgstatus: waiting for Gwaiting but is Grunnable") 346 }) 347 } 348 // Help GC if needed. 349 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { 350 // gp.preemptscan = false 351 // systemstack(func() { 352 // gcphasework(gp) 353 // }) 354 // } 355 } 356 if newval == _Grunning { 357 gp.gcscanvalid = false 358 } 359 } 360 361 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 362 // Returns old status. Cannot call casgstatus directly, because we are racing with an 363 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 364 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 365 // it would loop waiting for the status to go back to Gwaiting, which it never will. 366 //go:nosplit 367 func casgcopystack(gp *g) uint32 { 368 for { 369 oldstatus := readgstatus(gp) &^ _Gscan 370 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 371 throw("copystack: bad status, not Gwaiting or Grunnable") 372 } 373 if cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 374 return oldstatus 375 } 376 } 377 } 378 379 // scang blocks until gp's stack has been scanned. 380 // It might be scanned by scang or it might be scanned by the goroutine itself. 381 // Either way, the stack scan has completed when scang returns. 382 func scang(gp *g) { 383 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone. 384 // Nothing is racing with us now, but gcscandone might be set to true left over 385 // from an earlier round of stack scanning (we scan twice per GC). 386 // We use gcscandone to record whether the scan has been done during this round. 387 // It is important that the scan happens exactly once: if called twice, 388 // the installation of stack barriers will detect the double scan and die. 389 390 gp.gcscandone = false 391 392 // Endeavor to get gcscandone set to true, 393 // either by doing the stack scan ourselves or by coercing gp to scan itself. 394 // gp.gcscandone can transition from false to true when we're not looking 395 // (if we asked for preemption), so any time we lock the status using 396 // castogscanstatus we have to double-check that the scan is still not done. 397 for !gp.gcscandone { 398 switch s := readgstatus(gp); s { 399 default: 400 dumpgstatus(gp) 401 throw("stopg: invalid status") 402 403 case _Gdead: 404 // No stack. 405 gp.gcscandone = true 406 407 case _Gcopystack: 408 // Stack being switched. Go around again. 409 410 case _Grunnable, _Gsyscall, _Gwaiting: 411 // Claim goroutine by setting scan bit. 412 // Racing with execution or readying of gp. 413 // The scan bit keeps them from running 414 // the goroutine until we're done. 415 if castogscanstatus(gp, s, s|_Gscan) { 416 if !gp.gcscandone { 417 // Coordinate with traceback 418 // in sigprof. 419 for !cas(&gp.stackLock, 0, 1) { 420 osyield() 421 } 422 scanstack(gp) 423 atomicstore(&gp.stackLock, 0) 424 gp.gcscandone = true 425 } 426 restartg(gp) 427 } 428 429 case _Gscanwaiting: 430 // newstack is doing a scan for us right now. Wait. 431 432 case _Grunning: 433 // Goroutine running. Try to preempt execution so it can scan itself. 434 // The preemption handler (in newstack) does the actual scan. 435 436 // Optimization: if there is already a pending preemption request 437 // (from the previous loop iteration), don't bother with the atomics. 438 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt { 439 break 440 } 441 442 // Ask for preemption and self scan. 443 if castogscanstatus(gp, _Grunning, _Gscanrunning) { 444 if !gp.gcscandone { 445 gp.preemptscan = true 446 gp.preempt = true 447 gp.stackguard0 = stackPreempt 448 } 449 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning) 450 } 451 } 452 } 453 454 gp.preemptscan = false // cancel scan request if no longer needed 455 } 456 457 // The GC requests that this routine be moved from a scanmumble state to a mumble state. 458 func restartg(gp *g) { 459 s := readgstatus(gp) 460 switch s { 461 default: 462 dumpgstatus(gp) 463 throw("restartg: unexpected status") 464 465 case _Gdead: 466 // ok 467 468 case _Gscanrunnable, 469 _Gscanwaiting, 470 _Gscansyscall: 471 casfrom_Gscanstatus(gp, s, s&^_Gscan) 472 473 // Scan is now completed. 474 // Goroutine now needs to be made runnable. 475 // We put it on the global run queue; ready blocks on the global scheduler lock. 476 case _Gscanenqueue: 477 casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting) 478 if gp != getg().m.curg { 479 throw("processing Gscanenqueue on wrong m") 480 } 481 dropg() 482 ready(gp, 0) 483 } 484 } 485 486 // stopTheWorld stops all P's from executing goroutines, interrupting 487 // all goroutines at GC safe points and records reason as the reason 488 // for the stop. On return, only the current goroutine's P is running. 489 // stopTheWorld must not be called from a system stack and the caller 490 // must not hold worldsema. The caller must call startTheWorld when 491 // other P's should resume execution. 492 // 493 // stopTheWorld is safe for multiple goroutines to call at the 494 // same time. Each will execute its own stop, and the stops will 495 // be serialized. 496 // 497 // This is also used by routines that do stack dumps. If the system is 498 // in panic or being exited, this may not reliably stop all 499 // goroutines. 500 func stopTheWorld(reason string) { 501 semacquire(&worldsema, false) 502 getg().m.preemptoff = reason 503 systemstack(stopTheWorldWithSema) 504 } 505 506 // startTheWorld undoes the effects of stopTheWorld. 507 func startTheWorld() { 508 systemstack(startTheWorldWithSema) 509 // worldsema must be held over startTheWorldWithSema to ensure 510 // gomaxprocs cannot change while worldsema is held. 511 semrelease(&worldsema) 512 getg().m.preemptoff = "" 513 } 514 515 // Holding worldsema grants an M the right to try to stop the world 516 // and prevents gomaxprocs from changing concurrently. 517 var worldsema uint32 = 1 518 519 // stopTheWorldWithSema is the core implementation of stopTheWorld. 520 // The caller is responsible for acquiring worldsema and disabling 521 // preemption first and then should stopTheWorldWithSema on the system 522 // stack: 523 // 524 // semacquire(&worldsema, false) 525 // m.preemptoff = "reason" 526 // systemstack(stopTheWorldWithSema) 527 // 528 // When finished, the caller must either call startTheWorld or undo 529 // these three operations separately: 530 // 531 // m.preemptoff = "" 532 // systemstack(startTheWorldWithSema) 533 // semrelease(&worldsema) 534 // 535 // It is allowed to acquire worldsema once and then execute multiple 536 // startTheWorldWithSema/stopTheWorldWithSema pairs. 537 // Other P's are able to execute between successive calls to 538 // startTheWorldWithSema and stopTheWorldWithSema. 539 // Holding worldsema causes any other goroutines invoking 540 // stopTheWorld to block. 541 func stopTheWorldWithSema() { 542 _g_ := getg() 543 544 // If we hold a lock, then we won't be able to stop another M 545 // that is blocked trying to acquire the lock. 546 if _g_.m.locks > 0 { 547 throw("stopTheWorld: holding locks") 548 } 549 550 lock(&sched.lock) 551 sched.stopwait = gomaxprocs 552 atomicstore(&sched.gcwaiting, 1) 553 preemptall() 554 // stop current P 555 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 556 sched.stopwait-- 557 // try to retake all P's in Psyscall status 558 for i := 0; i < int(gomaxprocs); i++ { 559 p := allp[i] 560 s := p.status 561 if s == _Psyscall && cas(&p.status, s, _Pgcstop) { 562 if trace.enabled { 563 traceGoSysBlock(p) 564 traceProcStop(p) 565 } 566 p.syscalltick++ 567 sched.stopwait-- 568 } 569 } 570 // stop idle P's 571 for { 572 p := pidleget() 573 if p == nil { 574 break 575 } 576 p.status = _Pgcstop 577 sched.stopwait-- 578 } 579 wait := sched.stopwait > 0 580 unlock(&sched.lock) 581 582 // wait for remaining P's to stop voluntarily 583 if wait { 584 for { 585 // wait for 100us, then try to re-preempt in case of any races 586 if notetsleep(&sched.stopnote, 100*1000) { 587 noteclear(&sched.stopnote) 588 break 589 } 590 preemptall() 591 } 592 } 593 if sched.stopwait != 0 { 594 throw("stopTheWorld: not stopped") 595 } 596 for i := 0; i < int(gomaxprocs); i++ { 597 p := allp[i] 598 if p.status != _Pgcstop { 599 throw("stopTheWorld: not stopped") 600 } 601 } 602 } 603 604 func mhelpgc() { 605 _g_ := getg() 606 _g_.m.helpgc = -1 607 } 608 609 func startTheWorldWithSema() { 610 _g_ := getg() 611 612 _g_.m.locks++ // disable preemption because it can be holding p in a local var 613 gp := netpoll(false) // non-blocking 614 injectglist(gp) 615 add := needaddgcproc() 616 lock(&sched.lock) 617 618 procs := gomaxprocs 619 if newprocs != 0 { 620 procs = newprocs 621 newprocs = 0 622 } 623 p1 := procresize(procs) 624 sched.gcwaiting = 0 625 if sched.sysmonwait != 0 { 626 sched.sysmonwait = 0 627 notewakeup(&sched.sysmonnote) 628 } 629 unlock(&sched.lock) 630 631 for p1 != nil { 632 p := p1 633 p1 = p1.link.ptr() 634 if p.m != 0 { 635 mp := p.m.ptr() 636 p.m = 0 637 if mp.nextp != 0 { 638 throw("startTheWorld: inconsistent mp->nextp") 639 } 640 mp.nextp.set(p) 641 notewakeup(&mp.park) 642 } else { 643 // Start M to run P. Do not start another M below. 644 newm(nil, p) 645 add = false 646 } 647 } 648 649 // Wakeup an additional proc in case we have excessive runnable goroutines 650 // in local queues or in the global queue. If we don't, the proc will park itself. 651 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 652 if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 { 653 wakep() 654 } 655 656 if add { 657 // If GC could have used another helper proc, start one now, 658 // in the hope that it will be available next time. 659 // It would have been even better to start it before the collection, 660 // but doing so requires allocating memory, so it's tricky to 661 // coordinate. This lazy approach works out in practice: 662 // we don't mind if the first couple gc rounds don't have quite 663 // the maximum number of procs. 664 newm(mhelpgc, nil) 665 } 666 _g_.m.locks-- 667 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 668 _g_.stackguard0 = stackPreempt 669 } 670 } 671 672 // Called to start an M. 673 //go:nosplit 674 func mstart() { 675 _g_ := getg() 676 677 if _g_.stack.lo == 0 { 678 // Initialize stack bounds from system stack. 679 // Cgo may have left stack size in stack.hi. 680 size := _g_.stack.hi 681 if size == 0 { 682 size = 8192 * stackGuardMultiplier 683 } 684 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 685 _g_.stack.lo = _g_.stack.hi - size + 1024 686 } 687 // Initialize stack guards so that we can start calling 688 // both Go and C functions with stack growth prologues. 689 _g_.stackguard0 = _g_.stack.lo + _StackGuard 690 _g_.stackguard1 = _g_.stackguard0 691 mstart1() 692 } 693 694 func mstart1() { 695 _g_ := getg() 696 697 if _g_ != _g_.m.g0 { 698 throw("bad runtime·mstart") 699 } 700 701 // Record top of stack for use by mcall. 702 // Once we call schedule we're never coming back, 703 // so other calls can reuse this stack space. 704 gosave(&_g_.m.g0.sched) 705 _g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used 706 asminit() 707 minit() 708 709 // Install signal handlers; after minit so that minit can 710 // prepare the thread to be able to handle the signals. 711 if _g_.m == &m0 { 712 // Create an extra M for callbacks on threads not created by Go. 713 if iscgo && !cgoHasExtraM { 714 cgoHasExtraM = true 715 newextram() 716 } 717 initsig() 718 } 719 720 if fn := _g_.m.mstartfn; fn != nil { 721 fn() 722 } 723 724 if _g_.m.helpgc != 0 { 725 _g_.m.helpgc = 0 726 stopm() 727 } else if _g_.m != &m0 { 728 acquirep(_g_.m.nextp.ptr()) 729 _g_.m.nextp = 0 730 } 731 schedule() 732 } 733 734 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 735 // If a P is currently executing code, this will bring the P to a GC 736 // safe point and execute fn on that P. If the P is not executing code 737 // (it is idle or in a syscall), this will call fn(p) directly while 738 // preventing the P from exiting its state. This does not ensure that 739 // fn will run on every CPU executing Go code, but it acts as a global 740 // memory barrier. GC uses this as a "ragged barrier." 741 // 742 // The caller must hold worldsema. 743 func forEachP(fn func(*p)) { 744 mp := acquirem() 745 _p_ := getg().m.p.ptr() 746 747 lock(&sched.lock) 748 if sched.safePointWait != 0 { 749 throw("forEachP: sched.safePointWait != 0") 750 } 751 sched.safePointWait = gomaxprocs - 1 752 sched.safePointFn = fn 753 754 // Ask all Ps to run the safe point function. 755 for _, p := range allp[:gomaxprocs] { 756 if p != _p_ { 757 atomicstore(&p.runSafePointFn, 1) 758 } 759 } 760 preemptall() 761 762 // Any P entering _Pidle or _Psyscall from now on will observe 763 // p.runSafePointFn == 1 and will call runSafePointFn when 764 // changing its status to _Pidle/_Psyscall. 765 766 // Run safe point function for all idle Ps. sched.pidle will 767 // not change because we hold sched.lock. 768 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 769 if cas(&p.runSafePointFn, 1, 0) { 770 fn(p) 771 sched.safePointWait-- 772 } 773 } 774 775 wait := sched.safePointWait > 0 776 unlock(&sched.lock) 777 778 // Run fn for the current P. 779 fn(_p_) 780 781 // Force Ps currently in _Psyscall into _Pidle and hand them 782 // off to induce safe point function execution. 783 for i := 0; i < int(gomaxprocs); i++ { 784 p := allp[i] 785 s := p.status 786 if s == _Psyscall && p.runSafePointFn == 1 && cas(&p.status, s, _Pidle) { 787 if trace.enabled { 788 traceGoSysBlock(p) 789 traceProcStop(p) 790 } 791 p.syscalltick++ 792 handoffp(p) 793 } 794 } 795 796 // Wait for remaining Ps to run fn. 797 if wait { 798 for { 799 // Wait for 100us, then try to re-preempt in 800 // case of any races. 801 if notetsleep(&sched.safePointNote, 100*1000) { 802 noteclear(&sched.safePointNote) 803 break 804 } 805 preemptall() 806 } 807 } 808 if sched.safePointWait != 0 { 809 throw("forEachP: not done") 810 } 811 for i := 0; i < int(gomaxprocs); i++ { 812 p := allp[i] 813 if p.runSafePointFn != 0 { 814 throw("forEachP: P did not run fn") 815 } 816 } 817 818 lock(&sched.lock) 819 sched.safePointFn = nil 820 unlock(&sched.lock) 821 releasem(mp) 822 } 823 824 // runSafePointFn runs the safe point function, if any, for this P. 825 // This should be called like 826 // 827 // if getg().m.p.runSafePointFn != 0 { 828 // runSafePointFn() 829 // } 830 // 831 // runSafePointFn must be checked on any transition in to _Pidle or 832 // _Psyscall to avoid a race where forEachP sees that the P is running 833 // just before the P goes into _Pidle/_Psyscall and neither forEachP 834 // nor the P run the safe-point function. 835 func runSafePointFn() { 836 p := getg().m.p.ptr() 837 // Resolve the race between forEachP running the safe-point 838 // function on this P's behalf and this P running the 839 // safe-point function directly. 840 if !cas(&p.runSafePointFn, 1, 0) { 841 return 842 } 843 sched.safePointFn(p) 844 lock(&sched.lock) 845 sched.safePointWait-- 846 if sched.safePointWait == 0 { 847 notewakeup(&sched.safePointNote) 848 } 849 unlock(&sched.lock) 850 } 851 852 // When running with cgo, we call _cgo_thread_start 853 // to start threads for us so that we can play nicely with 854 // foreign code. 855 var cgoThreadStart unsafe.Pointer 856 857 type cgothreadstart struct { 858 g guintptr 859 tls *uint64 860 fn unsafe.Pointer 861 } 862 863 // Allocate a new m unassociated with any thread. 864 // Can use p for allocation context if needed. 865 // fn is recorded as the new m's m.mstartfn. 866 func allocm(_p_ *p, fn func()) *m { 867 _g_ := getg() 868 _g_.m.locks++ // disable GC because it can be called from sysmon 869 if _g_.m.p == 0 { 870 acquirep(_p_) // temporarily borrow p for mallocs in this function 871 } 872 mp := new(m) 873 mp.mstartfn = fn 874 mcommoninit(mp) 875 876 // In case of cgo or Solaris, pthread_create will make us a stack. 877 // Windows and Plan 9 will layout sched stack on OS stack. 878 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" { 879 mp.g0 = malg(-1) 880 } else { 881 mp.g0 = malg(8192 * stackGuardMultiplier) 882 } 883 mp.g0.m = mp 884 885 if _p_ == _g_.m.p.ptr() { 886 releasep() 887 } 888 _g_.m.locks-- 889 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 890 _g_.stackguard0 = stackPreempt 891 } 892 893 return mp 894 } 895 896 // needm is called when a cgo callback happens on a 897 // thread without an m (a thread not created by Go). 898 // In this case, needm is expected to find an m to use 899 // and return with m, g initialized correctly. 900 // Since m and g are not set now (likely nil, but see below) 901 // needm is limited in what routines it can call. In particular 902 // it can only call nosplit functions (textflag 7) and cannot 903 // do any scheduling that requires an m. 904 // 905 // In order to avoid needing heavy lifting here, we adopt 906 // the following strategy: there is a stack of available m's 907 // that can be stolen. Using compare-and-swap 908 // to pop from the stack has ABA races, so we simulate 909 // a lock by doing an exchange (via casp) to steal the stack 910 // head and replace the top pointer with MLOCKED (1). 911 // This serves as a simple spin lock that we can use even 912 // without an m. The thread that locks the stack in this way 913 // unlocks the stack by storing a valid stack head pointer. 914 // 915 // In order to make sure that there is always an m structure 916 // available to be stolen, we maintain the invariant that there 917 // is always one more than needed. At the beginning of the 918 // program (if cgo is in use) the list is seeded with a single m. 919 // If needm finds that it has taken the last m off the list, its job 920 // is - once it has installed its own m so that it can do things like 921 // allocate memory - to create a spare m and put it on the list. 922 // 923 // Each of these extra m's also has a g0 and a curg that are 924 // pressed into service as the scheduling stack and current 925 // goroutine for the duration of the cgo callback. 926 // 927 // When the callback is done with the m, it calls dropm to 928 // put the m back on the list. 929 //go:nosplit 930 func needm(x byte) { 931 if iscgo && !cgoHasExtraM { 932 // Can happen if C/C++ code calls Go from a global ctor. 933 // Can not throw, because scheduler is not initialized yet. 934 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 935 exit(1) 936 } 937 938 // Lock extra list, take head, unlock popped list. 939 // nilokay=false is safe here because of the invariant above, 940 // that the extra list always contains or will soon contain 941 // at least one m. 942 mp := lockextra(false) 943 944 // Set needextram when we've just emptied the list, 945 // so that the eventual call into cgocallbackg will 946 // allocate a new m for the extra list. We delay the 947 // allocation until then so that it can be done 948 // after exitsyscall makes sure it is okay to be 949 // running at all (that is, there's no garbage collection 950 // running right now). 951 mp.needextram = mp.schedlink == 0 952 unlockextra(mp.schedlink.ptr()) 953 954 // Install g (= m->g0) and set the stack bounds 955 // to match the current stack. We don't actually know 956 // how big the stack is, like we don't know how big any 957 // scheduling stack is, but we assume there's at least 32 kB, 958 // which is more than enough for us. 959 setg(mp.g0) 960 _g_ := getg() 961 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 962 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 963 _g_.stackguard0 = _g_.stack.lo + _StackGuard 964 965 msigsave(mp) 966 // Initialize this thread to use the m. 967 asminit() 968 minit() 969 } 970 971 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 972 973 // newextram allocates an m and puts it on the extra list. 974 // It is called with a working local m, so that it can do things 975 // like call schedlock and allocate. 976 func newextram() { 977 // Create extra goroutine locked to extra m. 978 // The goroutine is the context in which the cgo callback will run. 979 // The sched.pc will never be returned to, but setting it to 980 // goexit makes clear to the traceback routines where 981 // the goroutine stack ends. 982 mp := allocm(nil, nil) 983 gp := malg(4096) 984 gp.sched.pc = funcPC(goexit) + _PCQuantum 985 gp.sched.sp = gp.stack.hi 986 gp.sched.sp -= 4 * regSize // extra space in case of reads slightly beyond frame 987 gp.sched.lr = 0 988 gp.sched.g = guintptr(unsafe.Pointer(gp)) 989 gp.syscallpc = gp.sched.pc 990 gp.syscallsp = gp.sched.sp 991 gp.stktopsp = gp.sched.sp 992 // malg returns status as Gidle, change to Gsyscall before adding to allg 993 // where GC will see it. 994 casgstatus(gp, _Gidle, _Gsyscall) 995 gp.m = mp 996 mp.curg = gp 997 mp.locked = _LockInternal 998 mp.lockedg = gp 999 gp.lockedm = mp 1000 gp.goid = int64(xadd64(&sched.goidgen, 1)) 1001 if raceenabled { 1002 gp.racectx = racegostart(funcPC(newextram)) 1003 } 1004 // put on allg for garbage collector 1005 allgadd(gp) 1006 1007 // Add m to the extra list. 1008 mnext := lockextra(true) 1009 mp.schedlink.set(mnext) 1010 unlockextra(mp) 1011 } 1012 1013 // dropm is called when a cgo callback has called needm but is now 1014 // done with the callback and returning back into the non-Go thread. 1015 // It puts the current m back onto the extra list. 1016 // 1017 // The main expense here is the call to signalstack to release the 1018 // m's signal stack, and then the call to needm on the next callback 1019 // from this thread. It is tempting to try to save the m for next time, 1020 // which would eliminate both these costs, but there might not be 1021 // a next time: the current thread (which Go does not control) might exit. 1022 // If we saved the m for that thread, there would be an m leak each time 1023 // such a thread exited. Instead, we acquire and release an m on each 1024 // call. These should typically not be scheduling operations, just a few 1025 // atomics, so the cost should be small. 1026 // 1027 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1028 // variable using pthread_key_create. Unlike the pthread keys we already use 1029 // on OS X, this dummy key would never be read by Go code. It would exist 1030 // only so that we could register at thread-exit-time destructor. 1031 // That destructor would put the m back onto the extra list. 1032 // This is purely a performance optimization. The current version, 1033 // in which dropm happens on each cgo call, is still correct too. 1034 // We may have to keep the current version on systems with cgo 1035 // but without pthreads, like Windows. 1036 func dropm() { 1037 // Undo whatever initialization minit did during needm. 1038 unminit() 1039 1040 // Clear m and g, and return m to the extra list. 1041 // After the call to setg we can only call nosplit functions 1042 // with no pointer manipulation. 1043 mp := getg().m 1044 mnext := lockextra(true) 1045 mp.schedlink.set(mnext) 1046 1047 setg(nil) 1048 unlockextra(mp) 1049 } 1050 1051 var extram uintptr 1052 1053 // lockextra locks the extra list and returns the list head. 1054 // The caller must unlock the list by storing a new list head 1055 // to extram. If nilokay is true, then lockextra will 1056 // return a nil list head if that's what it finds. If nilokay is false, 1057 // lockextra will keep waiting until the list head is no longer nil. 1058 //go:nosplit 1059 func lockextra(nilokay bool) *m { 1060 const locked = 1 1061 1062 for { 1063 old := atomicloaduintptr(&extram) 1064 if old == locked { 1065 yield := osyield 1066 yield() 1067 continue 1068 } 1069 if old == 0 && !nilokay { 1070 usleep(1) 1071 continue 1072 } 1073 if casuintptr(&extram, old, locked) { 1074 return (*m)(unsafe.Pointer(old)) 1075 } 1076 yield := osyield 1077 yield() 1078 continue 1079 } 1080 } 1081 1082 //go:nosplit 1083 func unlockextra(mp *m) { 1084 atomicstoreuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1085 } 1086 1087 // Create a new m. It will start off with a call to fn, or else the scheduler. 1088 // fn needs to be static and not a heap allocated closure. 1089 // May run with m.p==nil, so write barriers are not allowed. 1090 //go:nowritebarrier 1091 func newm(fn func(), _p_ *p) { 1092 mp := allocm(_p_, fn) 1093 mp.nextp.set(_p_) 1094 msigsave(mp) 1095 if iscgo { 1096 var ts cgothreadstart 1097 if _cgo_thread_start == nil { 1098 throw("_cgo_thread_start missing") 1099 } 1100 ts.g.set(mp.g0) 1101 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1102 ts.fn = unsafe.Pointer(funcPC(mstart)) 1103 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1104 return 1105 } 1106 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi)) 1107 } 1108 1109 // Stops execution of the current m until new work is available. 1110 // Returns with acquired P. 1111 func stopm() { 1112 _g_ := getg() 1113 1114 if _g_.m.locks != 0 { 1115 throw("stopm holding locks") 1116 } 1117 if _g_.m.p != 0 { 1118 throw("stopm holding p") 1119 } 1120 if _g_.m.spinning { 1121 _g_.m.spinning = false 1122 xadd(&sched.nmspinning, -1) 1123 } 1124 1125 retry: 1126 lock(&sched.lock) 1127 mput(_g_.m) 1128 unlock(&sched.lock) 1129 notesleep(&_g_.m.park) 1130 noteclear(&_g_.m.park) 1131 if _g_.m.helpgc != 0 { 1132 gchelper() 1133 _g_.m.helpgc = 0 1134 _g_.m.mcache = nil 1135 _g_.m.p = 0 1136 goto retry 1137 } 1138 acquirep(_g_.m.nextp.ptr()) 1139 _g_.m.nextp = 0 1140 } 1141 1142 func mspinning() { 1143 gp := getg() 1144 if !runqempty(gp.m.nextp.ptr()) { 1145 // Something (presumably the GC) was readied while the 1146 // runtime was starting up this M, so the M is no 1147 // longer spinning. 1148 if int32(xadd(&sched.nmspinning, -1)) < 0 { 1149 throw("mspinning: nmspinning underflowed") 1150 } 1151 } else { 1152 gp.m.spinning = true 1153 } 1154 } 1155 1156 // Schedules some M to run the p (creates an M if necessary). 1157 // If p==nil, tries to get an idle P, if no idle P's does nothing. 1158 // May run with m.p==nil, so write barriers are not allowed. 1159 //go:nowritebarrier 1160 func startm(_p_ *p, spinning bool) { 1161 lock(&sched.lock) 1162 if _p_ == nil { 1163 _p_ = pidleget() 1164 if _p_ == nil { 1165 unlock(&sched.lock) 1166 if spinning { 1167 xadd(&sched.nmspinning, -1) 1168 } 1169 return 1170 } 1171 } 1172 mp := mget() 1173 unlock(&sched.lock) 1174 if mp == nil { 1175 var fn func() 1176 if spinning { 1177 fn = mspinning 1178 } 1179 newm(fn, _p_) 1180 return 1181 } 1182 if mp.spinning { 1183 throw("startm: m is spinning") 1184 } 1185 if mp.nextp != 0 { 1186 throw("startm: m has p") 1187 } 1188 if spinning && !runqempty(_p_) { 1189 throw("startm: p has runnable gs") 1190 } 1191 mp.spinning = spinning 1192 mp.nextp.set(_p_) 1193 notewakeup(&mp.park) 1194 } 1195 1196 // Hands off P from syscall or locked M. 1197 // Always runs without a P, so write barriers are not allowed. 1198 //go:nowritebarrier 1199 func handoffp(_p_ *p) { 1200 // if it has local work, start it straight away 1201 if !runqempty(_p_) || sched.runqsize != 0 { 1202 startm(_p_, false) 1203 return 1204 } 1205 // no local work, check that there are no spinning/idle M's, 1206 // otherwise our help is not required 1207 if atomicload(&sched.nmspinning)+atomicload(&sched.npidle) == 0 && cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 1208 startm(_p_, true) 1209 return 1210 } 1211 lock(&sched.lock) 1212 if sched.gcwaiting != 0 { 1213 _p_.status = _Pgcstop 1214 sched.stopwait-- 1215 if sched.stopwait == 0 { 1216 notewakeup(&sched.stopnote) 1217 } 1218 unlock(&sched.lock) 1219 return 1220 } 1221 if _p_.runSafePointFn != 0 && cas(&_p_.runSafePointFn, 1, 0) { 1222 sched.safePointFn(_p_) 1223 sched.safePointWait-- 1224 if sched.safePointWait == 0 { 1225 notewakeup(&sched.safePointNote) 1226 } 1227 } 1228 if sched.runqsize != 0 { 1229 unlock(&sched.lock) 1230 startm(_p_, false) 1231 return 1232 } 1233 // If this is the last running P and nobody is polling network, 1234 // need to wakeup another M to poll network. 1235 if sched.npidle == uint32(gomaxprocs-1) && atomicload64(&sched.lastpoll) != 0 { 1236 unlock(&sched.lock) 1237 startm(_p_, false) 1238 return 1239 } 1240 pidleput(_p_) 1241 unlock(&sched.lock) 1242 } 1243 1244 // Tries to add one more P to execute G's. 1245 // Called when a G is made runnable (newproc, ready). 1246 func wakep() { 1247 // be conservative about spinning threads 1248 if !cas(&sched.nmspinning, 0, 1) { 1249 return 1250 } 1251 startm(nil, true) 1252 } 1253 1254 // Stops execution of the current m that is locked to a g until the g is runnable again. 1255 // Returns with acquired P. 1256 func stoplockedm() { 1257 _g_ := getg() 1258 1259 if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m { 1260 throw("stoplockedm: inconsistent locking") 1261 } 1262 if _g_.m.p != 0 { 1263 // Schedule another M to run this p. 1264 _p_ := releasep() 1265 handoffp(_p_) 1266 } 1267 incidlelocked(1) 1268 // Wait until another thread schedules lockedg again. 1269 notesleep(&_g_.m.park) 1270 noteclear(&_g_.m.park) 1271 status := readgstatus(_g_.m.lockedg) 1272 if status&^_Gscan != _Grunnable { 1273 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 1274 dumpgstatus(_g_) 1275 throw("stoplockedm: not runnable") 1276 } 1277 acquirep(_g_.m.nextp.ptr()) 1278 _g_.m.nextp = 0 1279 } 1280 1281 // Schedules the locked m to run the locked gp. 1282 // May run during STW, so write barriers are not allowed. 1283 //go:nowritebarrier 1284 func startlockedm(gp *g) { 1285 _g_ := getg() 1286 1287 mp := gp.lockedm 1288 if mp == _g_.m { 1289 throw("startlockedm: locked to me") 1290 } 1291 if mp.nextp != 0 { 1292 throw("startlockedm: m has p") 1293 } 1294 // directly handoff current P to the locked m 1295 incidlelocked(-1) 1296 _p_ := releasep() 1297 mp.nextp.set(_p_) 1298 notewakeup(&mp.park) 1299 stopm() 1300 } 1301 1302 // Stops the current m for stopTheWorld. 1303 // Returns when the world is restarted. 1304 func gcstopm() { 1305 _g_ := getg() 1306 1307 if sched.gcwaiting == 0 { 1308 throw("gcstopm: not waiting for gc") 1309 } 1310 if _g_.m.spinning { 1311 _g_.m.spinning = false 1312 xadd(&sched.nmspinning, -1) 1313 } 1314 _p_ := releasep() 1315 lock(&sched.lock) 1316 _p_.status = _Pgcstop 1317 sched.stopwait-- 1318 if sched.stopwait == 0 { 1319 notewakeup(&sched.stopnote) 1320 } 1321 unlock(&sched.lock) 1322 stopm() 1323 } 1324 1325 // Schedules gp to run on the current M. 1326 // If inheritTime is true, gp inherits the remaining time in the 1327 // current time slice. Otherwise, it starts a new time slice. 1328 // Never returns. 1329 func execute(gp *g, inheritTime bool) { 1330 _g_ := getg() 1331 1332 casgstatus(gp, _Grunnable, _Grunning) 1333 gp.waitsince = 0 1334 gp.preempt = false 1335 gp.stackguard0 = gp.stack.lo + _StackGuard 1336 if !inheritTime { 1337 _g_.m.p.ptr().schedtick++ 1338 } 1339 _g_.m.curg = gp 1340 gp.m = _g_.m 1341 1342 // Check whether the profiler needs to be turned on or off. 1343 hz := sched.profilehz 1344 if _g_.m.profilehz != hz { 1345 resetcpuprofiler(hz) 1346 } 1347 1348 if trace.enabled { 1349 // GoSysExit has to happen when we have a P, but before GoStart. 1350 // So we emit it here. 1351 if gp.syscallsp != 0 && gp.sysblocktraced { 1352 // Since gp.sysblocktraced is true, we must emit an event. 1353 // There is a race between the code that initializes sysexitseq 1354 // and sysexitticks (in exitsyscall, which runs without a P, 1355 // and therefore is not stopped with the rest of the world) 1356 // and the code that initializes a new trace. 1357 // The recorded sysexitseq and sysexitticks must therefore 1358 // be treated as "best effort". If they are valid for this trace, 1359 // then great, use them for greater accuracy. 1360 // But if they're not valid for this trace, assume that the 1361 // trace was started after the actual syscall exit (but before 1362 // we actually managed to start the goroutine, aka right now), 1363 // and assign a fresh time stamp to keep the log consistent. 1364 seq, ts := gp.sysexitseq, gp.sysexitticks 1365 if seq == 0 || int64(seq)-int64(trace.seqStart) < 0 { 1366 seq, ts = tracestamp() 1367 } 1368 traceGoSysExit(seq, ts) 1369 } 1370 traceGoStart() 1371 } 1372 1373 gogo(&gp.sched) 1374 } 1375 1376 // Finds a runnable goroutine to execute. 1377 // Tries to steal from other P's, get g from global queue, poll network. 1378 func findrunnable() (gp *g, inheritTime bool) { 1379 _g_ := getg() 1380 1381 top: 1382 if sched.gcwaiting != 0 { 1383 gcstopm() 1384 goto top 1385 } 1386 if _g_.m.p.ptr().runSafePointFn != 0 { 1387 runSafePointFn() 1388 } 1389 if fingwait && fingwake { 1390 if gp := wakefing(); gp != nil { 1391 ready(gp, 0) 1392 } 1393 } 1394 1395 // local runq 1396 if gp, inheritTime := runqget(_g_.m.p.ptr()); gp != nil { 1397 return gp, inheritTime 1398 } 1399 1400 // global runq 1401 if sched.runqsize != 0 { 1402 lock(&sched.lock) 1403 gp := globrunqget(_g_.m.p.ptr(), 0) 1404 unlock(&sched.lock) 1405 if gp != nil { 1406 return gp, false 1407 } 1408 } 1409 1410 // Poll network. 1411 // This netpoll is only an optimization before we resort to stealing. 1412 // We can safely skip it if there a thread blocked in netpoll already. 1413 // If there is any kind of logical race with that blocked thread 1414 // (e.g. it has already returned from netpoll, but does not set lastpoll yet), 1415 // this thread will do blocking netpoll below anyway. 1416 if netpollinited() && sched.lastpoll != 0 { 1417 if gp := netpoll(false); gp != nil { // non-blocking 1418 // netpoll returns list of goroutines linked by schedlink. 1419 injectglist(gp.schedlink.ptr()) 1420 casgstatus(gp, _Gwaiting, _Grunnable) 1421 if trace.enabled { 1422 traceGoUnpark(gp, 0) 1423 } 1424 return gp, false 1425 } 1426 } 1427 1428 // If number of spinning M's >= number of busy P's, block. 1429 // This is necessary to prevent excessive CPU consumption 1430 // when GOMAXPROCS>>1 but the program parallelism is low. 1431 if !_g_.m.spinning && 2*atomicload(&sched.nmspinning) >= uint32(gomaxprocs)-atomicload(&sched.npidle) { // TODO: fast atomic 1432 goto stop 1433 } 1434 if !_g_.m.spinning { 1435 _g_.m.spinning = true 1436 xadd(&sched.nmspinning, 1) 1437 } 1438 // random steal from other P's 1439 for i := 0; i < int(4*gomaxprocs); i++ { 1440 if sched.gcwaiting != 0 { 1441 goto top 1442 } 1443 _p_ := allp[fastrand1()%uint32(gomaxprocs)] 1444 var gp *g 1445 if _p_ == _g_.m.p.ptr() { 1446 gp, _ = runqget(_p_) 1447 } else { 1448 stealRunNextG := i > 2*int(gomaxprocs) // first look for ready queues with more than 1 g 1449 gp = runqsteal(_g_.m.p.ptr(), _p_, stealRunNextG) 1450 } 1451 if gp != nil { 1452 return gp, false 1453 } 1454 } 1455 1456 stop: 1457 1458 // We have nothing to do. If we're in the GC mark phase and can 1459 // safely scan and blacken objects, run idle-time marking 1460 // rather than give up the P. 1461 if _p_ := _g_.m.p.ptr(); gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != nil && gcMarkWorkAvailable(_p_) { 1462 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 1463 gp := _p_.gcBgMarkWorker 1464 casgstatus(gp, _Gwaiting, _Grunnable) 1465 if trace.enabled { 1466 traceGoUnpark(gp, 0) 1467 } 1468 return gp, false 1469 } 1470 1471 // return P and block 1472 lock(&sched.lock) 1473 if sched.gcwaiting != 0 || _g_.m.p.ptr().runSafePointFn != 0 { 1474 unlock(&sched.lock) 1475 goto top 1476 } 1477 if sched.runqsize != 0 { 1478 gp := globrunqget(_g_.m.p.ptr(), 0) 1479 unlock(&sched.lock) 1480 return gp, false 1481 } 1482 _p_ := releasep() 1483 pidleput(_p_) 1484 unlock(&sched.lock) 1485 if _g_.m.spinning { 1486 _g_.m.spinning = false 1487 xadd(&sched.nmspinning, -1) 1488 } 1489 1490 // check all runqueues once again 1491 for i := 0; i < int(gomaxprocs); i++ { 1492 _p_ := allp[i] 1493 if _p_ != nil && !runqempty(_p_) { 1494 lock(&sched.lock) 1495 _p_ = pidleget() 1496 unlock(&sched.lock) 1497 if _p_ != nil { 1498 acquirep(_p_) 1499 goto top 1500 } 1501 break 1502 } 1503 } 1504 1505 // poll network 1506 if netpollinited() && xchg64(&sched.lastpoll, 0) != 0 { 1507 if _g_.m.p != 0 { 1508 throw("findrunnable: netpoll with p") 1509 } 1510 if _g_.m.spinning { 1511 throw("findrunnable: netpoll with spinning") 1512 } 1513 gp := netpoll(true) // block until new work is available 1514 atomicstore64(&sched.lastpoll, uint64(nanotime())) 1515 if gp != nil { 1516 lock(&sched.lock) 1517 _p_ = pidleget() 1518 unlock(&sched.lock) 1519 if _p_ != nil { 1520 acquirep(_p_) 1521 injectglist(gp.schedlink.ptr()) 1522 casgstatus(gp, _Gwaiting, _Grunnable) 1523 if trace.enabled { 1524 traceGoUnpark(gp, 0) 1525 } 1526 return gp, false 1527 } 1528 injectglist(gp) 1529 } 1530 } 1531 stopm() 1532 goto top 1533 } 1534 1535 func resetspinning() { 1536 _g_ := getg() 1537 1538 var nmspinning uint32 1539 if _g_.m.spinning { 1540 _g_.m.spinning = false 1541 nmspinning = xadd(&sched.nmspinning, -1) 1542 if int32(nmspinning) < 0 { 1543 throw("findrunnable: negative nmspinning") 1544 } 1545 } else { 1546 nmspinning = atomicload(&sched.nmspinning) 1547 } 1548 1549 // M wakeup policy is deliberately somewhat conservative (see nmspinning handling), 1550 // so see if we need to wakeup another P here. 1551 if nmspinning == 0 && atomicload(&sched.npidle) > 0 { 1552 wakep() 1553 } 1554 } 1555 1556 // Injects the list of runnable G's into the scheduler. 1557 // Can run concurrently with GC. 1558 func injectglist(glist *g) { 1559 if glist == nil { 1560 return 1561 } 1562 if trace.enabled { 1563 for gp := glist; gp != nil; gp = gp.schedlink.ptr() { 1564 traceGoUnpark(gp, 0) 1565 } 1566 } 1567 lock(&sched.lock) 1568 var n int 1569 for n = 0; glist != nil; n++ { 1570 gp := glist 1571 glist = gp.schedlink.ptr() 1572 casgstatus(gp, _Gwaiting, _Grunnable) 1573 globrunqput(gp) 1574 } 1575 unlock(&sched.lock) 1576 for ; n != 0 && sched.npidle != 0; n-- { 1577 startm(nil, false) 1578 } 1579 } 1580 1581 // One round of scheduler: find a runnable goroutine and execute it. 1582 // Never returns. 1583 func schedule() { 1584 _g_ := getg() 1585 1586 if _g_.m.locks != 0 { 1587 throw("schedule: holding locks") 1588 } 1589 1590 if _g_.m.lockedg != nil { 1591 stoplockedm() 1592 execute(_g_.m.lockedg, false) // Never returns. 1593 } 1594 1595 top: 1596 if sched.gcwaiting != 0 { 1597 gcstopm() 1598 goto top 1599 } 1600 if _g_.m.p.ptr().runSafePointFn != 0 { 1601 runSafePointFn() 1602 } 1603 1604 var gp *g 1605 var inheritTime bool 1606 if trace.enabled || trace.shutdown { 1607 gp = traceReader() 1608 if gp != nil { 1609 casgstatus(gp, _Gwaiting, _Grunnable) 1610 traceGoUnpark(gp, 0) 1611 resetspinning() 1612 } 1613 } 1614 if gp == nil && gcBlackenEnabled != 0 { 1615 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 1616 if gp != nil { 1617 resetspinning() 1618 } 1619 } 1620 if gp == nil { 1621 // Check the global runnable queue once in a while to ensure fairness. 1622 // Otherwise two goroutines can completely occupy the local runqueue 1623 // by constantly respawning each other. 1624 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 1625 lock(&sched.lock) 1626 gp = globrunqget(_g_.m.p.ptr(), 1) 1627 unlock(&sched.lock) 1628 if gp != nil { 1629 resetspinning() 1630 } 1631 } 1632 } 1633 if gp == nil { 1634 gp, inheritTime = runqget(_g_.m.p.ptr()) 1635 if gp != nil && _g_.m.spinning { 1636 throw("schedule: spinning with local work") 1637 } 1638 } 1639 if gp == nil { 1640 gp, inheritTime = findrunnable() // blocks until work is available 1641 resetspinning() 1642 } 1643 1644 if gp.lockedm != nil { 1645 // Hands off own p to the locked m, 1646 // then blocks waiting for a new p. 1647 startlockedm(gp) 1648 goto top 1649 } 1650 1651 execute(gp, inheritTime) 1652 } 1653 1654 // dropg removes the association between m and the current goroutine m->curg (gp for short). 1655 // Typically a caller sets gp's status away from Grunning and then 1656 // immediately calls dropg to finish the job. The caller is also responsible 1657 // for arranging that gp will be restarted using ready at an 1658 // appropriate time. After calling dropg and arranging for gp to be 1659 // readied later, the caller can do other work but eventually should 1660 // call schedule to restart the scheduling of goroutines on this m. 1661 func dropg() { 1662 _g_ := getg() 1663 1664 if _g_.m.lockedg == nil { 1665 _g_.m.curg.m = nil 1666 _g_.m.curg = nil 1667 } 1668 } 1669 1670 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 1671 unlock((*mutex)(lock)) 1672 return true 1673 } 1674 1675 // park continuation on g0. 1676 func park_m(gp *g) { 1677 _g_ := getg() 1678 1679 if trace.enabled { 1680 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp) 1681 } 1682 1683 casgstatus(gp, _Grunning, _Gwaiting) 1684 dropg() 1685 1686 if _g_.m.waitunlockf != nil { 1687 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf)) 1688 ok := fn(gp, _g_.m.waitlock) 1689 _g_.m.waitunlockf = nil 1690 _g_.m.waitlock = nil 1691 if !ok { 1692 if trace.enabled { 1693 traceGoUnpark(gp, 2) 1694 } 1695 casgstatus(gp, _Gwaiting, _Grunnable) 1696 execute(gp, true) // Schedule it back, never returns. 1697 } 1698 } 1699 schedule() 1700 } 1701 1702 func goschedImpl(gp *g) { 1703 status := readgstatus(gp) 1704 if status&^_Gscan != _Grunning { 1705 dumpgstatus(gp) 1706 throw("bad g status") 1707 } 1708 casgstatus(gp, _Grunning, _Grunnable) 1709 dropg() 1710 lock(&sched.lock) 1711 globrunqput(gp) 1712 unlock(&sched.lock) 1713 1714 schedule() 1715 } 1716 1717 // Gosched continuation on g0. 1718 func gosched_m(gp *g) { 1719 if trace.enabled { 1720 traceGoSched() 1721 } 1722 goschedImpl(gp) 1723 } 1724 1725 func gopreempt_m(gp *g) { 1726 if trace.enabled { 1727 traceGoPreempt() 1728 } 1729 goschedImpl(gp) 1730 } 1731 1732 // Finishes execution of the current goroutine. 1733 func goexit1() { 1734 if raceenabled { 1735 racegoend() 1736 } 1737 if trace.enabled { 1738 traceGoEnd() 1739 } 1740 mcall(goexit0) 1741 } 1742 1743 // goexit continuation on g0. 1744 func goexit0(gp *g) { 1745 _g_ := getg() 1746 1747 casgstatus(gp, _Grunning, _Gdead) 1748 gp.m = nil 1749 gp.lockedm = nil 1750 _g_.m.lockedg = nil 1751 gp.paniconfault = false 1752 gp._defer = nil // should be true already but just in case. 1753 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 1754 gp.writebuf = nil 1755 gp.waitreason = "" 1756 gp.param = nil 1757 1758 dropg() 1759 1760 if _g_.m.locked&^_LockExternal != 0 { 1761 print("invalid m->locked = ", _g_.m.locked, "\n") 1762 throw("internal lockOSThread error") 1763 } 1764 _g_.m.locked = 0 1765 gfput(_g_.m.p.ptr(), gp) 1766 schedule() 1767 } 1768 1769 //go:nosplit 1770 //go:nowritebarrier 1771 func save(pc, sp uintptr) { 1772 _g_ := getg() 1773 1774 _g_.sched.pc = pc 1775 _g_.sched.sp = sp 1776 _g_.sched.lr = 0 1777 _g_.sched.ret = 0 1778 _g_.sched.ctxt = nil 1779 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 1780 } 1781 1782 // The goroutine g is about to enter a system call. 1783 // Record that it's not using the cpu anymore. 1784 // This is called only from the go syscall library and cgocall, 1785 // not from the low-level system calls used by the runtime. 1786 // 1787 // Entersyscall cannot split the stack: the gosave must 1788 // make g->sched refer to the caller's stack segment, because 1789 // entersyscall is going to return immediately after. 1790 // 1791 // Nothing entersyscall calls can split the stack either. 1792 // We cannot safely move the stack during an active call to syscall, 1793 // because we do not know which of the uintptr arguments are 1794 // really pointers (back into the stack). 1795 // In practice, this means that we make the fast path run through 1796 // entersyscall doing no-split things, and the slow path has to use systemstack 1797 // to run bigger things on the system stack. 1798 // 1799 // reentersyscall is the entry point used by cgo callbacks, where explicitly 1800 // saved SP and PC are restored. This is needed when exitsyscall will be called 1801 // from a function further up in the call stack than the parent, as g->syscallsp 1802 // must always point to a valid stack frame. entersyscall below is the normal 1803 // entry point for syscalls, which obtains the SP and PC from the caller. 1804 // 1805 // Syscall tracing: 1806 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 1807 // If the syscall does not block, that is it, we do not emit any other events. 1808 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 1809 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 1810 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 1811 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 1812 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 1813 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 1814 // and we wait for the increment before emitting traceGoSysExit. 1815 // Note that the increment is done even if tracing is not enabled, 1816 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 1817 // 1818 //go:nosplit 1819 func reentersyscall(pc, sp uintptr) { 1820 _g_ := getg() 1821 1822 // Disable preemption because during this function g is in Gsyscall status, 1823 // but can have inconsistent g->sched, do not let GC observe it. 1824 _g_.m.locks++ 1825 1826 // Entersyscall must not call any function that might split/grow the stack. 1827 // (See details in comment above.) 1828 // Catch calls that might, by replacing the stack guard with something that 1829 // will trip any stack check and leaving a flag to tell newstack to die. 1830 _g_.stackguard0 = stackPreempt 1831 _g_.throwsplit = true 1832 1833 // Leave SP around for GC and traceback. 1834 save(pc, sp) 1835 _g_.syscallsp = sp 1836 _g_.syscallpc = pc 1837 casgstatus(_g_, _Grunning, _Gsyscall) 1838 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 1839 systemstack(func() { 1840 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 1841 throw("entersyscall") 1842 }) 1843 } 1844 1845 if trace.enabled { 1846 systemstack(traceGoSysCall) 1847 // systemstack itself clobbers g.sched.{pc,sp} and we might 1848 // need them later when the G is genuinely blocked in a 1849 // syscall 1850 save(pc, sp) 1851 } 1852 1853 if atomicload(&sched.sysmonwait) != 0 { // TODO: fast atomic 1854 systemstack(entersyscall_sysmon) 1855 save(pc, sp) 1856 } 1857 1858 if _g_.m.p.ptr().runSafePointFn != 0 { 1859 // runSafePointFn may stack split if run on this stack 1860 systemstack(runSafePointFn) 1861 save(pc, sp) 1862 } 1863 1864 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 1865 _g_.sysblocktraced = true 1866 _g_.m.mcache = nil 1867 _g_.m.p.ptr().m = 0 1868 atomicstore(&_g_.m.p.ptr().status, _Psyscall) 1869 if sched.gcwaiting != 0 { 1870 systemstack(entersyscall_gcwait) 1871 save(pc, sp) 1872 } 1873 1874 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). 1875 // We set _StackGuard to StackPreempt so that first split stack check calls morestack. 1876 // Morestack detects this case and throws. 1877 _g_.stackguard0 = stackPreempt 1878 _g_.m.locks-- 1879 } 1880 1881 // Standard syscall entry used by the go syscall library and normal cgo calls. 1882 //go:nosplit 1883 func entersyscall(dummy int32) { 1884 reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 1885 } 1886 1887 func entersyscall_sysmon() { 1888 lock(&sched.lock) 1889 if atomicload(&sched.sysmonwait) != 0 { 1890 atomicstore(&sched.sysmonwait, 0) 1891 notewakeup(&sched.sysmonnote) 1892 } 1893 unlock(&sched.lock) 1894 } 1895 1896 func entersyscall_gcwait() { 1897 _g_ := getg() 1898 _p_ := _g_.m.p.ptr() 1899 1900 lock(&sched.lock) 1901 if sched.stopwait > 0 && cas(&_p_.status, _Psyscall, _Pgcstop) { 1902 if trace.enabled { 1903 traceGoSysBlock(_p_) 1904 traceProcStop(_p_) 1905 } 1906 _p_.syscalltick++ 1907 if sched.stopwait--; sched.stopwait == 0 { 1908 notewakeup(&sched.stopnote) 1909 } 1910 } 1911 unlock(&sched.lock) 1912 } 1913 1914 // The same as entersyscall(), but with a hint that the syscall is blocking. 1915 //go:nosplit 1916 func entersyscallblock(dummy int32) { 1917 _g_ := getg() 1918 1919 _g_.m.locks++ // see comment in entersyscall 1920 _g_.throwsplit = true 1921 _g_.stackguard0 = stackPreempt // see comment in entersyscall 1922 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 1923 _g_.sysblocktraced = true 1924 _g_.m.p.ptr().syscalltick++ 1925 1926 // Leave SP around for GC and traceback. 1927 pc := getcallerpc(unsafe.Pointer(&dummy)) 1928 sp := getcallersp(unsafe.Pointer(&dummy)) 1929 save(pc, sp) 1930 _g_.syscallsp = _g_.sched.sp 1931 _g_.syscallpc = _g_.sched.pc 1932 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 1933 sp1 := sp 1934 sp2 := _g_.sched.sp 1935 sp3 := _g_.syscallsp 1936 systemstack(func() { 1937 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 1938 throw("entersyscallblock") 1939 }) 1940 } 1941 casgstatus(_g_, _Grunning, _Gsyscall) 1942 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 1943 systemstack(func() { 1944 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 1945 throw("entersyscallblock") 1946 }) 1947 } 1948 1949 systemstack(entersyscallblock_handoff) 1950 1951 // Resave for traceback during blocked call. 1952 save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 1953 1954 _g_.m.locks-- 1955 } 1956 1957 func entersyscallblock_handoff() { 1958 if trace.enabled { 1959 traceGoSysCall() 1960 traceGoSysBlock(getg().m.p.ptr()) 1961 } 1962 handoffp(releasep()) 1963 } 1964 1965 // The goroutine g exited its system call. 1966 // Arrange for it to run on a cpu again. 1967 // This is called only from the go syscall library, not 1968 // from the low-level system calls used by the 1969 //go:nosplit 1970 func exitsyscall(dummy int32) { 1971 _g_ := getg() 1972 1973 _g_.m.locks++ // see comment in entersyscall 1974 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp { 1975 throw("exitsyscall: syscall frame is no longer valid") 1976 } 1977 1978 _g_.waitsince = 0 1979 oldp := _g_.m.p.ptr() 1980 if exitsyscallfast() { 1981 if _g_.m.mcache == nil { 1982 throw("lost mcache") 1983 } 1984 if trace.enabled { 1985 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 1986 systemstack(traceGoStart) 1987 } 1988 } 1989 // There's a cpu for us, so we can run. 1990 _g_.m.p.ptr().syscalltick++ 1991 // We need to cas the status and scan before resuming... 1992 casgstatus(_g_, _Gsyscall, _Grunning) 1993 1994 // Garbage collector isn't running (since we are), 1995 // so okay to clear syscallsp. 1996 _g_.syscallsp = 0 1997 _g_.m.locks-- 1998 if _g_.preempt { 1999 // restore the preemption request in case we've cleared it in newstack 2000 _g_.stackguard0 = stackPreempt 2001 } else { 2002 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 2003 _g_.stackguard0 = _g_.stack.lo + _StackGuard 2004 } 2005 _g_.throwsplit = false 2006 return 2007 } 2008 2009 _g_.sysexitticks = 0 2010 _g_.sysexitseq = 0 2011 if trace.enabled { 2012 // Wait till traceGoSysBlock event is emitted. 2013 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2014 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 2015 osyield() 2016 } 2017 // We can't trace syscall exit right now because we don't have a P. 2018 // Tracing code can invoke write barriers that cannot run without a P. 2019 // So instead we remember the syscall exit time and emit the event 2020 // in execute when we have a P. 2021 _g_.sysexitseq, _g_.sysexitticks = tracestamp() 2022 } 2023 2024 _g_.m.locks-- 2025 2026 // Call the scheduler. 2027 mcall(exitsyscall0) 2028 2029 if _g_.m.mcache == nil { 2030 throw("lost mcache") 2031 } 2032 2033 // Scheduler returned, so we're allowed to run now. 2034 // Delete the syscallsp information that we left for 2035 // the garbage collector during the system call. 2036 // Must wait until now because until gosched returns 2037 // we don't know for sure that the garbage collector 2038 // is not running. 2039 _g_.syscallsp = 0 2040 _g_.m.p.ptr().syscalltick++ 2041 _g_.throwsplit = false 2042 } 2043 2044 //go:nosplit 2045 func exitsyscallfast() bool { 2046 _g_ := getg() 2047 2048 // Freezetheworld sets stopwait but does not retake P's. 2049 if sched.stopwait == freezeStopWait { 2050 _g_.m.mcache = nil 2051 _g_.m.p = 0 2052 return false 2053 } 2054 2055 // Try to re-acquire the last P. 2056 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) { 2057 // There's a cpu for us, so we can run. 2058 _g_.m.mcache = _g_.m.p.ptr().mcache 2059 _g_.m.p.ptr().m.set(_g_.m) 2060 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2061 if trace.enabled { 2062 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 2063 // traceGoSysBlock for this syscall was already emitted, 2064 // but here we effectively retake the p from the new syscall running on the same p. 2065 systemstack(func() { 2066 // Denote blocking of the new syscall. 2067 traceGoSysBlock(_g_.m.p.ptr()) 2068 // Denote completion of the current syscall. 2069 traceGoSysExit(tracestamp()) 2070 }) 2071 } 2072 _g_.m.p.ptr().syscalltick++ 2073 } 2074 return true 2075 } 2076 2077 // Try to get any other idle P. 2078 oldp := _g_.m.p.ptr() 2079 _g_.m.mcache = nil 2080 _g_.m.p = 0 2081 if sched.pidle != 0 { 2082 var ok bool 2083 systemstack(func() { 2084 ok = exitsyscallfast_pidle() 2085 if ok && trace.enabled { 2086 if oldp != nil { 2087 // Wait till traceGoSysBlock event is emitted. 2088 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2089 for oldp.syscalltick == _g_.m.syscalltick { 2090 osyield() 2091 } 2092 } 2093 traceGoSysExit(tracestamp()) 2094 } 2095 }) 2096 if ok { 2097 return true 2098 } 2099 } 2100 return false 2101 } 2102 2103 func exitsyscallfast_pidle() bool { 2104 lock(&sched.lock) 2105 _p_ := pidleget() 2106 if _p_ != nil && atomicload(&sched.sysmonwait) != 0 { 2107 atomicstore(&sched.sysmonwait, 0) 2108 notewakeup(&sched.sysmonnote) 2109 } 2110 unlock(&sched.lock) 2111 if _p_ != nil { 2112 acquirep(_p_) 2113 return true 2114 } 2115 return false 2116 } 2117 2118 // exitsyscall slow path on g0. 2119 // Failed to acquire P, enqueue gp as runnable. 2120 func exitsyscall0(gp *g) { 2121 _g_ := getg() 2122 2123 casgstatus(gp, _Gsyscall, _Grunnable) 2124 dropg() 2125 lock(&sched.lock) 2126 _p_ := pidleget() 2127 if _p_ == nil { 2128 globrunqput(gp) 2129 } else if atomicload(&sched.sysmonwait) != 0 { 2130 atomicstore(&sched.sysmonwait, 0) 2131 notewakeup(&sched.sysmonnote) 2132 } 2133 unlock(&sched.lock) 2134 if _p_ != nil { 2135 acquirep(_p_) 2136 execute(gp, false) // Never returns. 2137 } 2138 if _g_.m.lockedg != nil { 2139 // Wait until another thread schedules gp and so m again. 2140 stoplockedm() 2141 execute(gp, false) // Never returns. 2142 } 2143 stopm() 2144 schedule() // Never returns. 2145 } 2146 2147 func beforefork() { 2148 gp := getg().m.curg 2149 2150 // Fork can hang if preempted with signals frequently enough (see issue 5517). 2151 // Ensure that we stay on the same M where we disable profiling. 2152 gp.m.locks++ 2153 if gp.m.profilehz != 0 { 2154 resetcpuprofiler(0) 2155 } 2156 2157 // This function is called before fork in syscall package. 2158 // Code between fork and exec must not allocate memory nor even try to grow stack. 2159 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 2160 // runtime_AfterFork will undo this in parent process, but not in child. 2161 gp.stackguard0 = stackFork 2162 } 2163 2164 // Called from syscall package before fork. 2165 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 2166 //go:nosplit 2167 func syscall_runtime_BeforeFork() { 2168 systemstack(beforefork) 2169 } 2170 2171 func afterfork() { 2172 gp := getg().m.curg 2173 2174 // See the comment in beforefork. 2175 gp.stackguard0 = gp.stack.lo + _StackGuard 2176 2177 hz := sched.profilehz 2178 if hz != 0 { 2179 resetcpuprofiler(hz) 2180 } 2181 gp.m.locks-- 2182 } 2183 2184 // Called from syscall package after fork in parent. 2185 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 2186 //go:nosplit 2187 func syscall_runtime_AfterFork() { 2188 systemstack(afterfork) 2189 } 2190 2191 // Allocate a new g, with a stack big enough for stacksize bytes. 2192 func malg(stacksize int32) *g { 2193 newg := new(g) 2194 if stacksize >= 0 { 2195 stacksize = round2(_StackSystem + stacksize) 2196 systemstack(func() { 2197 newg.stack, newg.stkbar = stackalloc(uint32(stacksize)) 2198 }) 2199 newg.stackguard0 = newg.stack.lo + _StackGuard 2200 newg.stackguard1 = ^uintptr(0) 2201 newg.stackAlloc = uintptr(stacksize) 2202 } 2203 return newg 2204 } 2205 2206 // Create a new g running fn with siz bytes of arguments. 2207 // Put it on the queue of g's waiting to run. 2208 // The compiler turns a go statement into a call to this. 2209 // Cannot split the stack because it assumes that the arguments 2210 // are available sequentially after &fn; they would not be 2211 // copied if a stack split occurred. 2212 //go:nosplit 2213 func newproc(siz int32, fn *funcval) { 2214 argp := add(unsafe.Pointer(&fn), ptrSize) 2215 pc := getcallerpc(unsafe.Pointer(&siz)) 2216 systemstack(func() { 2217 newproc1(fn, (*uint8)(argp), siz, 0, pc) 2218 }) 2219 } 2220 2221 // Create a new g running fn with narg bytes of arguments starting 2222 // at argp and returning nret bytes of results. callerpc is the 2223 // address of the go statement that created this. The new g is put 2224 // on the queue of g's waiting to run. 2225 func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g { 2226 _g_ := getg() 2227 2228 if fn == nil { 2229 _g_.m.throwing = -1 // do not dump full stacks 2230 throw("go of nil func value") 2231 } 2232 _g_.m.locks++ // disable preemption because it can be holding p in a local var 2233 siz := narg + nret 2234 siz = (siz + 7) &^ 7 2235 2236 // We could allocate a larger initial stack if necessary. 2237 // Not worth it: this is almost always an error. 2238 // 4*sizeof(uintreg): extra space added below 2239 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 2240 if siz >= _StackMin-4*regSize-regSize { 2241 throw("newproc: function arguments too large for new goroutine") 2242 } 2243 2244 _p_ := _g_.m.p.ptr() 2245 newg := gfget(_p_) 2246 if newg == nil { 2247 newg = malg(_StackMin) 2248 casgstatus(newg, _Gidle, _Gdead) 2249 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 2250 } 2251 if newg.stack.hi == 0 { 2252 throw("newproc1: newg missing stack") 2253 } 2254 2255 if readgstatus(newg) != _Gdead { 2256 throw("newproc1: new g is not Gdead") 2257 } 2258 2259 totalSize := 4*regSize + uintptr(siz) // extra space in case of reads slightly beyond frame 2260 if hasLinkRegister { 2261 totalSize += ptrSize 2262 } 2263 totalSize += -totalSize & (spAlign - 1) // align to spAlign 2264 sp := newg.stack.hi - totalSize 2265 spArg := sp 2266 if hasLinkRegister { 2267 // caller's LR 2268 *(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil 2269 spArg += ptrSize 2270 } 2271 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) 2272 2273 memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 2274 newg.sched.sp = sp 2275 newg.stktopsp = sp 2276 newg.sched.pc = funcPC(goexit) + _PCQuantum // +PCQuantum so that previous instruction is in same function 2277 newg.sched.g = guintptr(unsafe.Pointer(newg)) 2278 gostartcallfn(&newg.sched, fn) 2279 newg.gopc = callerpc 2280 newg.startpc = fn.fn 2281 casgstatus(newg, _Gdead, _Grunnable) 2282 2283 if _p_.goidcache == _p_.goidcacheend { 2284 // Sched.goidgen is the last allocated id, 2285 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 2286 // At startup sched.goidgen=0, so main goroutine receives goid=1. 2287 _p_.goidcache = xadd64(&sched.goidgen, _GoidCacheBatch) 2288 _p_.goidcache -= _GoidCacheBatch - 1 2289 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 2290 } 2291 newg.goid = int64(_p_.goidcache) 2292 _p_.goidcache++ 2293 if raceenabled { 2294 newg.racectx = racegostart(callerpc) 2295 } 2296 if trace.enabled { 2297 traceGoCreate(newg, newg.startpc) 2298 } 2299 runqput(_p_, newg, true) 2300 2301 if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic 2302 wakep() 2303 } 2304 _g_.m.locks-- 2305 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 2306 _g_.stackguard0 = stackPreempt 2307 } 2308 return newg 2309 } 2310 2311 // Put on gfree list. 2312 // If local list is too long, transfer a batch to the global list. 2313 func gfput(_p_ *p, gp *g) { 2314 if readgstatus(gp) != _Gdead { 2315 throw("gfput: bad status (not Gdead)") 2316 } 2317 2318 stksize := gp.stackAlloc 2319 2320 if stksize != _FixedStack { 2321 // non-standard stack size - free it. 2322 stackfree(gp.stack, gp.stackAlloc) 2323 gp.stack.lo = 0 2324 gp.stack.hi = 0 2325 gp.stackguard0 = 0 2326 gp.stkbar = nil 2327 gp.stkbarPos = 0 2328 } else { 2329 // Reset stack barriers. 2330 gp.stkbar = gp.stkbar[:0] 2331 gp.stkbarPos = 0 2332 } 2333 2334 gp.schedlink.set(_p_.gfree) 2335 _p_.gfree = gp 2336 _p_.gfreecnt++ 2337 if _p_.gfreecnt >= 64 { 2338 lock(&sched.gflock) 2339 for _p_.gfreecnt >= 32 { 2340 _p_.gfreecnt-- 2341 gp = _p_.gfree 2342 _p_.gfree = gp.schedlink.ptr() 2343 gp.schedlink.set(sched.gfree) 2344 sched.gfree = gp 2345 sched.ngfree++ 2346 } 2347 unlock(&sched.gflock) 2348 } 2349 } 2350 2351 // Get from gfree list. 2352 // If local list is empty, grab a batch from global list. 2353 func gfget(_p_ *p) *g { 2354 retry: 2355 gp := _p_.gfree 2356 if gp == nil && sched.gfree != nil { 2357 lock(&sched.gflock) 2358 for _p_.gfreecnt < 32 && sched.gfree != nil { 2359 _p_.gfreecnt++ 2360 gp = sched.gfree 2361 sched.gfree = gp.schedlink.ptr() 2362 sched.ngfree-- 2363 gp.schedlink.set(_p_.gfree) 2364 _p_.gfree = gp 2365 } 2366 unlock(&sched.gflock) 2367 goto retry 2368 } 2369 if gp != nil { 2370 _p_.gfree = gp.schedlink.ptr() 2371 _p_.gfreecnt-- 2372 if gp.stack.lo == 0 { 2373 // Stack was deallocated in gfput. Allocate a new one. 2374 systemstack(func() { 2375 gp.stack, gp.stkbar = stackalloc(_FixedStack) 2376 }) 2377 gp.stackguard0 = gp.stack.lo + _StackGuard 2378 gp.stackAlloc = _FixedStack 2379 } else { 2380 if raceenabled { 2381 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) 2382 } 2383 } 2384 } 2385 return gp 2386 } 2387 2388 // Purge all cached G's from gfree list to the global list. 2389 func gfpurge(_p_ *p) { 2390 lock(&sched.gflock) 2391 for _p_.gfreecnt != 0 { 2392 _p_.gfreecnt-- 2393 gp := _p_.gfree 2394 _p_.gfree = gp.schedlink.ptr() 2395 gp.schedlink.set(sched.gfree) 2396 sched.gfree = gp 2397 sched.ngfree++ 2398 } 2399 unlock(&sched.gflock) 2400 } 2401 2402 // Breakpoint executes a breakpoint trap. 2403 func Breakpoint() { 2404 breakpoint() 2405 } 2406 2407 // dolockOSThread is called by LockOSThread and lockOSThread below 2408 // after they modify m.locked. Do not allow preemption during this call, 2409 // or else the m might be different in this function than in the caller. 2410 //go:nosplit 2411 func dolockOSThread() { 2412 _g_ := getg() 2413 _g_.m.lockedg = _g_ 2414 _g_.lockedm = _g_.m 2415 } 2416 2417 //go:nosplit 2418 2419 // LockOSThread wires the calling goroutine to its current operating system thread. 2420 // Until the calling goroutine exits or calls UnlockOSThread, it will always 2421 // execute in that thread, and no other goroutine can. 2422 func LockOSThread() { 2423 getg().m.locked |= _LockExternal 2424 dolockOSThread() 2425 } 2426 2427 //go:nosplit 2428 func lockOSThread() { 2429 getg().m.locked += _LockInternal 2430 dolockOSThread() 2431 } 2432 2433 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 2434 // after they update m->locked. Do not allow preemption during this call, 2435 // or else the m might be in different in this function than in the caller. 2436 //go:nosplit 2437 func dounlockOSThread() { 2438 _g_ := getg() 2439 if _g_.m.locked != 0 { 2440 return 2441 } 2442 _g_.m.lockedg = nil 2443 _g_.lockedm = nil 2444 } 2445 2446 //go:nosplit 2447 2448 // UnlockOSThread unwires the calling goroutine from its fixed operating system thread. 2449 // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op. 2450 func UnlockOSThread() { 2451 getg().m.locked &^= _LockExternal 2452 dounlockOSThread() 2453 } 2454 2455 //go:nosplit 2456 func unlockOSThread() { 2457 _g_ := getg() 2458 if _g_.m.locked < _LockInternal { 2459 systemstack(badunlockosthread) 2460 } 2461 _g_.m.locked -= _LockInternal 2462 dounlockOSThread() 2463 } 2464 2465 func badunlockosthread() { 2466 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 2467 } 2468 2469 func gcount() int32 { 2470 n := int32(allglen) - sched.ngfree 2471 for i := 0; ; i++ { 2472 _p_ := allp[i] 2473 if _p_ == nil { 2474 break 2475 } 2476 n -= _p_.gfreecnt 2477 } 2478 2479 // All these variables can be changed concurrently, so the result can be inconsistent. 2480 // But at least the current goroutine is running. 2481 if n < 1 { 2482 n = 1 2483 } 2484 return n 2485 } 2486 2487 func mcount() int32 { 2488 return sched.mcount 2489 } 2490 2491 var prof struct { 2492 lock uint32 2493 hz int32 2494 } 2495 2496 func _System() { _System() } 2497 func _ExternalCode() { _ExternalCode() } 2498 func _GC() { _GC() } 2499 2500 // Called if we receive a SIGPROF signal. 2501 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 2502 if prof.hz == 0 { 2503 return 2504 } 2505 2506 // Profiling runs concurrently with GC, so it must not allocate. 2507 mp.mallocing++ 2508 2509 // Coordinate with stack barrier insertion in scanstack. 2510 for !cas(&gp.stackLock, 0, 1) { 2511 osyield() 2512 } 2513 2514 // Define that a "user g" is a user-created goroutine, and a "system g" 2515 // is one that is m->g0 or m->gsignal. 2516 // 2517 // We might be interrupted for profiling halfway through a 2518 // goroutine switch. The switch involves updating three (or four) values: 2519 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 2520 // because once it gets updated the new g is running. 2521 // 2522 // When switching from a user g to a system g, LR is not considered live, 2523 // so the update only affects g, SP, and PC. Since PC must be last, there 2524 // the possible partial transitions in ordinary execution are (1) g alone is updated, 2525 // (2) both g and SP are updated, and (3) SP alone is updated. 2526 // If SP or g alone is updated, we can detect the partial transition by checking 2527 // whether the SP is within g's stack bounds. (We could also require that SP 2528 // be changed only after g, but the stack bounds check is needed by other 2529 // cases, so there is no need to impose an additional requirement.) 2530 // 2531 // There is one exceptional transition to a system g, not in ordinary execution. 2532 // When a signal arrives, the operating system starts the signal handler running 2533 // with an updated PC and SP. The g is updated last, at the beginning of the 2534 // handler. There are two reasons this is okay. First, until g is updated the 2535 // g and SP do not match, so the stack bounds check detects the partial transition. 2536 // Second, signal handlers currently run with signals disabled, so a profiling 2537 // signal cannot arrive during the handler. 2538 // 2539 // When switching from a system g to a user g, there are three possibilities. 2540 // 2541 // First, it may be that the g switch has no PC update, because the SP 2542 // either corresponds to a user g throughout (as in asmcgocall) 2543 // or because it has been arranged to look like a user g frame 2544 // (as in cgocallback_gofunc). In this case, since the entire 2545 // transition is a g+SP update, a partial transition updating just one of 2546 // those will be detected by the stack bounds check. 2547 // 2548 // Second, when returning from a signal handler, the PC and SP updates 2549 // are performed by the operating system in an atomic update, so the g 2550 // update must be done before them. The stack bounds check detects 2551 // the partial transition here, and (again) signal handlers run with signals 2552 // disabled, so a profiling signal cannot arrive then anyway. 2553 // 2554 // Third, the common case: it may be that the switch updates g, SP, and PC 2555 // separately. If the PC is within any of the functions that does this, 2556 // we don't ask for a traceback. C.F. the function setsSP for more about this. 2557 // 2558 // There is another apparently viable approach, recorded here in case 2559 // the "PC within setsSP function" check turns out not to be usable. 2560 // It would be possible to delay the update of either g or SP until immediately 2561 // before the PC update instruction. Then, because of the stack bounds check, 2562 // the only problematic interrupt point is just before that PC update instruction, 2563 // and the sigprof handler can detect that instruction and simulate stepping past 2564 // it in order to reach a consistent state. On ARM, the update of g must be made 2565 // in two places (in R10 and also in a TLS slot), so the delayed update would 2566 // need to be the SP update. The sigprof handler must read the instruction at 2567 // the current PC and if it was the known instruction (for example, JMP BX or 2568 // MOV R2, PC), use that other register in place of the PC value. 2569 // The biggest drawback to this solution is that it requires that we can tell 2570 // whether it's safe to read from the memory pointed at by PC. 2571 // In a correct program, we can test PC == nil and otherwise read, 2572 // but if a profiling signal happens at the instant that a program executes 2573 // a bad jump (before the program manages to handle the resulting fault) 2574 // the profiling handler could fault trying to read nonexistent memory. 2575 // 2576 // To recap, there are no constraints on the assembly being used for the 2577 // transition. We simply require that g and SP match and that the PC is not 2578 // in gogo. 2579 traceback := true 2580 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) { 2581 traceback = false 2582 } 2583 var stk [maxCPUProfStack]uintptr 2584 n := 0 2585 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 2586 // Cgo, we can't unwind and symbolize arbitrary C code, 2587 // so instead collect Go stack that leads to the cgo call. 2588 // This is especially important on windows, since all syscalls are cgo calls. 2589 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0) 2590 } else if traceback { 2591 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 2592 } 2593 if !traceback || n <= 0 { 2594 // Normal traceback is impossible or has failed. 2595 // See if it falls into several common cases. 2596 n = 0 2597 if GOOS == "windows" && n == 0 && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 2598 // Libcall, i.e. runtime syscall on windows. 2599 // Collect Go stack that leads to the call. 2600 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 2601 } 2602 if n == 0 { 2603 // If all of the above has failed, account it against abstract "System" or "GC". 2604 n = 2 2605 // "ExternalCode" is better than "etext". 2606 if pc > firstmoduledata.etext { 2607 pc = funcPC(_ExternalCode) + _PCQuantum 2608 } 2609 stk[0] = pc 2610 if mp.preemptoff != "" || mp.helpgc != 0 { 2611 stk[1] = funcPC(_GC) + _PCQuantum 2612 } else { 2613 stk[1] = funcPC(_System) + _PCQuantum 2614 } 2615 } 2616 } 2617 atomicstore(&gp.stackLock, 0) 2618 2619 if prof.hz != 0 { 2620 // Simple cas-lock to coordinate with setcpuprofilerate. 2621 for !cas(&prof.lock, 0, 1) { 2622 osyield() 2623 } 2624 if prof.hz != 0 { 2625 cpuprof.add(stk[:n]) 2626 } 2627 atomicstore(&prof.lock, 0) 2628 } 2629 mp.mallocing-- 2630 } 2631 2632 // Reports whether a function will set the SP 2633 // to an absolute value. Important that 2634 // we don't traceback when these are at the bottom 2635 // of the stack since we can't be sure that we will 2636 // find the caller. 2637 // 2638 // If the function is not on the bottom of the stack 2639 // we assume that it will have set it up so that traceback will be consistent, 2640 // either by being a traceback terminating function 2641 // or putting one on the stack at the right offset. 2642 func setsSP(pc uintptr) bool { 2643 f := findfunc(pc) 2644 if f == nil { 2645 // couldn't find the function for this PC, 2646 // so assume the worst and stop traceback 2647 return true 2648 } 2649 switch f.entry { 2650 case gogoPC, systemstackPC, mcallPC, morestackPC: 2651 return true 2652 } 2653 return false 2654 } 2655 2656 // Arrange to call fn with a traceback hz times a second. 2657 func setcpuprofilerate_m(hz int32) { 2658 // Force sane arguments. 2659 if hz < 0 { 2660 hz = 0 2661 } 2662 2663 // Disable preemption, otherwise we can be rescheduled to another thread 2664 // that has profiling enabled. 2665 _g_ := getg() 2666 _g_.m.locks++ 2667 2668 // Stop profiler on this thread so that it is safe to lock prof. 2669 // if a profiling signal came in while we had prof locked, 2670 // it would deadlock. 2671 resetcpuprofiler(0) 2672 2673 for !cas(&prof.lock, 0, 1) { 2674 osyield() 2675 } 2676 prof.hz = hz 2677 atomicstore(&prof.lock, 0) 2678 2679 lock(&sched.lock) 2680 sched.profilehz = hz 2681 unlock(&sched.lock) 2682 2683 if hz != 0 { 2684 resetcpuprofiler(hz) 2685 } 2686 2687 _g_.m.locks-- 2688 } 2689 2690 // Change number of processors. The world is stopped, sched is locked. 2691 // gcworkbufs are not being modified by either the GC or 2692 // the write barrier code. 2693 // Returns list of Ps with local work, they need to be scheduled by the caller. 2694 func procresize(nprocs int32) *p { 2695 old := gomaxprocs 2696 if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs { 2697 throw("procresize: invalid arg") 2698 } 2699 if trace.enabled { 2700 traceGomaxprocs(nprocs) 2701 } 2702 2703 // update statistics 2704 now := nanotime() 2705 if sched.procresizetime != 0 { 2706 sched.totaltime += int64(old) * (now - sched.procresizetime) 2707 } 2708 sched.procresizetime = now 2709 2710 // initialize new P's 2711 for i := int32(0); i < nprocs; i++ { 2712 pp := allp[i] 2713 if pp == nil { 2714 pp = new(p) 2715 pp.id = i 2716 pp.status = _Pgcstop 2717 pp.sudogcache = pp.sudogbuf[:0] 2718 for i := range pp.deferpool { 2719 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 2720 } 2721 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 2722 } 2723 if pp.mcache == nil { 2724 if old == 0 && i == 0 { 2725 if getg().m.mcache == nil { 2726 throw("missing mcache?") 2727 } 2728 pp.mcache = getg().m.mcache // bootstrap 2729 } else { 2730 pp.mcache = allocmcache() 2731 } 2732 } 2733 } 2734 2735 // free unused P's 2736 for i := nprocs; i < old; i++ { 2737 p := allp[i] 2738 if trace.enabled { 2739 if p == getg().m.p.ptr() { 2740 // moving to p[0], pretend that we were descheduled 2741 // and then scheduled again to keep the trace sane. 2742 traceGoSched() 2743 traceProcStop(p) 2744 } 2745 } 2746 // move all runnable goroutines to the global queue 2747 for p.runqhead != p.runqtail { 2748 // pop from tail of local queue 2749 p.runqtail-- 2750 gp := p.runq[p.runqtail%uint32(len(p.runq))] 2751 // push onto head of global queue 2752 globrunqputhead(gp) 2753 } 2754 if p.runnext != 0 { 2755 globrunqputhead(p.runnext.ptr()) 2756 p.runnext = 0 2757 } 2758 // if there's a background worker, make it runnable and put 2759 // it on the global queue so it can clean itself up 2760 if p.gcBgMarkWorker != nil { 2761 casgstatus(p.gcBgMarkWorker, _Gwaiting, _Grunnable) 2762 if trace.enabled { 2763 traceGoUnpark(p.gcBgMarkWorker, 0) 2764 } 2765 globrunqput(p.gcBgMarkWorker) 2766 p.gcBgMarkWorker = nil 2767 } 2768 for i := range p.sudogbuf { 2769 p.sudogbuf[i] = nil 2770 } 2771 p.sudogcache = p.sudogbuf[:0] 2772 for i := range p.deferpool { 2773 for j := range p.deferpoolbuf[i] { 2774 p.deferpoolbuf[i][j] = nil 2775 } 2776 p.deferpool[i] = p.deferpoolbuf[i][:0] 2777 } 2778 freemcache(p.mcache) 2779 p.mcache = nil 2780 gfpurge(p) 2781 traceProcFree(p) 2782 p.status = _Pdead 2783 // can't free P itself because it can be referenced by an M in syscall 2784 } 2785 2786 _g_ := getg() 2787 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 2788 // continue to use the current P 2789 _g_.m.p.ptr().status = _Prunning 2790 } else { 2791 // release the current P and acquire allp[0] 2792 if _g_.m.p != 0 { 2793 _g_.m.p.ptr().m = 0 2794 } 2795 _g_.m.p = 0 2796 _g_.m.mcache = nil 2797 p := allp[0] 2798 p.m = 0 2799 p.status = _Pidle 2800 acquirep(p) 2801 if trace.enabled { 2802 traceGoStart() 2803 } 2804 } 2805 var runnablePs *p 2806 for i := nprocs - 1; i >= 0; i-- { 2807 p := allp[i] 2808 if _g_.m.p.ptr() == p { 2809 continue 2810 } 2811 p.status = _Pidle 2812 if runqempty(p) { 2813 pidleput(p) 2814 } else { 2815 p.m.set(mget()) 2816 p.link.set(runnablePs) 2817 runnablePs = p 2818 } 2819 } 2820 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 2821 atomicstore((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 2822 return runnablePs 2823 } 2824 2825 // Associate p and the current m. 2826 func acquirep(_p_ *p) { 2827 acquirep1(_p_) 2828 2829 // have p; write barriers now allowed 2830 _g_ := getg() 2831 _g_.m.mcache = _p_.mcache 2832 2833 if trace.enabled { 2834 traceProcStart() 2835 } 2836 } 2837 2838 // May run during STW, so write barriers are not allowed. 2839 //go:nowritebarrier 2840 func acquirep1(_p_ *p) { 2841 _g_ := getg() 2842 2843 if _g_.m.p != 0 || _g_.m.mcache != nil { 2844 throw("acquirep: already in go") 2845 } 2846 if _p_.m != 0 || _p_.status != _Pidle { 2847 id := int32(0) 2848 if _p_.m != 0 { 2849 id = _p_.m.ptr().id 2850 } 2851 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 2852 throw("acquirep: invalid p state") 2853 } 2854 _g_.m.p.set(_p_) 2855 _p_.m.set(_g_.m) 2856 _p_.status = _Prunning 2857 } 2858 2859 // Disassociate p and the current m. 2860 func releasep() *p { 2861 _g_ := getg() 2862 2863 if _g_.m.p == 0 || _g_.m.mcache == nil { 2864 throw("releasep: invalid arg") 2865 } 2866 _p_ := _g_.m.p.ptr() 2867 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 2868 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 2869 throw("releasep: invalid p state") 2870 } 2871 if trace.enabled { 2872 traceProcStop(_g_.m.p.ptr()) 2873 } 2874 _g_.m.p = 0 2875 _g_.m.mcache = nil 2876 _p_.m = 0 2877 _p_.status = _Pidle 2878 return _p_ 2879 } 2880 2881 func incidlelocked(v int32) { 2882 lock(&sched.lock) 2883 sched.nmidlelocked += v 2884 if v > 0 { 2885 checkdead() 2886 } 2887 unlock(&sched.lock) 2888 } 2889 2890 // Check for deadlock situation. 2891 // The check is based on number of running M's, if 0 -> deadlock. 2892 func checkdead() { 2893 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 2894 // there are no running goroutines. The calling program is 2895 // assumed to be running. 2896 if islibrary || isarchive { 2897 return 2898 } 2899 2900 // If we are dying because of a signal caught on an already idle thread, 2901 // freezetheworld will cause all running threads to block. 2902 // And runtime will essentially enter into deadlock state, 2903 // except that there is a thread that will call exit soon. 2904 if panicking > 0 { 2905 return 2906 } 2907 2908 // -1 for sysmon 2909 run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1 2910 if run > 0 { 2911 return 2912 } 2913 if run < 0 { 2914 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n") 2915 throw("checkdead: inconsistent counts") 2916 } 2917 2918 grunning := 0 2919 lock(&allglock) 2920 for i := 0; i < len(allgs); i++ { 2921 gp := allgs[i] 2922 if isSystemGoroutine(gp) { 2923 continue 2924 } 2925 s := readgstatus(gp) 2926 switch s &^ _Gscan { 2927 case _Gwaiting: 2928 grunning++ 2929 case _Grunnable, 2930 _Grunning, 2931 _Gsyscall: 2932 unlock(&allglock) 2933 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 2934 throw("checkdead: runnable g") 2935 } 2936 } 2937 unlock(&allglock) 2938 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 2939 throw("no goroutines (main called runtime.Goexit) - deadlock!") 2940 } 2941 2942 // Maybe jump time forward for playground. 2943 gp := timejump() 2944 if gp != nil { 2945 casgstatus(gp, _Gwaiting, _Grunnable) 2946 globrunqput(gp) 2947 _p_ := pidleget() 2948 if _p_ == nil { 2949 throw("checkdead: no p for timer") 2950 } 2951 mp := mget() 2952 if mp == nil { 2953 newm(nil, _p_) 2954 } else { 2955 mp.nextp.set(_p_) 2956 notewakeup(&mp.park) 2957 } 2958 return 2959 } 2960 2961 getg().m.throwing = -1 // do not dump full stacks 2962 throw("all goroutines are asleep - deadlock!") 2963 } 2964 2965 // forcegcperiod is the maximum time in nanoseconds between garbage 2966 // collections. If we go this long without a garbage collection, one 2967 // is forced to run. 2968 // 2969 // This is a variable for testing purposes. It normally doesn't change. 2970 var forcegcperiod int64 = 2 * 60 * 1e9 2971 2972 func sysmon() { 2973 // If a heap span goes unused for 5 minutes after a garbage collection, 2974 // we hand it back to the operating system. 2975 scavengelimit := int64(5 * 60 * 1e9) 2976 2977 if debug.scavenge > 0 { 2978 // Scavenge-a-lot for testing. 2979 forcegcperiod = 10 * 1e6 2980 scavengelimit = 20 * 1e6 2981 } 2982 2983 lastscavenge := nanotime() 2984 nscavenge := 0 2985 2986 lasttrace := int64(0) 2987 idle := 0 // how many cycles in succession we had not wokeup somebody 2988 delay := uint32(0) 2989 for { 2990 if idle == 0 { // start with 20us sleep... 2991 delay = 20 2992 } else if idle > 50 { // start doubling the sleep after 1ms... 2993 delay *= 2 2994 } 2995 if delay > 10*1000 { // up to 10ms 2996 delay = 10 * 1000 2997 } 2998 usleep(delay) 2999 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic 3000 lock(&sched.lock) 3001 if atomicload(&sched.gcwaiting) != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs) { 3002 atomicstore(&sched.sysmonwait, 1) 3003 unlock(&sched.lock) 3004 // Make wake-up period small enough 3005 // for the sampling to be correct. 3006 maxsleep := forcegcperiod / 2 3007 if scavengelimit < forcegcperiod { 3008 maxsleep = scavengelimit / 2 3009 } 3010 notetsleep(&sched.sysmonnote, maxsleep) 3011 lock(&sched.lock) 3012 atomicstore(&sched.sysmonwait, 0) 3013 noteclear(&sched.sysmonnote) 3014 idle = 0 3015 delay = 20 3016 } 3017 unlock(&sched.lock) 3018 } 3019 // poll network if not polled for more than 10ms 3020 lastpoll := int64(atomicload64(&sched.lastpoll)) 3021 now := nanotime() 3022 unixnow := unixnanotime() 3023 if lastpoll != 0 && lastpoll+10*1000*1000 < now { 3024 cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 3025 gp := netpoll(false) // non-blocking - returns list of goroutines 3026 if gp != nil { 3027 // Need to decrement number of idle locked M's 3028 // (pretending that one more is running) before injectglist. 3029 // Otherwise it can lead to the following situation: 3030 // injectglist grabs all P's but before it starts M's to run the P's, 3031 // another M returns from syscall, finishes running its G, 3032 // observes that there is no work to do and no other running M's 3033 // and reports deadlock. 3034 incidlelocked(-1) 3035 injectglist(gp) 3036 incidlelocked(1) 3037 } 3038 } 3039 // retake P's blocked in syscalls 3040 // and preempt long running G's 3041 if retake(now) != 0 { 3042 idle = 0 3043 } else { 3044 idle++ 3045 } 3046 // check if we need to force a GC 3047 lastgc := int64(atomicload64(&memstats.last_gc)) 3048 if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomicload(&forcegc.idle) != 0 && atomicloaduint(&bggc.working) == 0 { 3049 lock(&forcegc.lock) 3050 forcegc.idle = 0 3051 forcegc.g.schedlink = 0 3052 injectglist(forcegc.g) 3053 unlock(&forcegc.lock) 3054 } 3055 // scavenge heap once in a while 3056 if lastscavenge+scavengelimit/2 < now { 3057 mHeap_Scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) 3058 lastscavenge = now 3059 nscavenge++ 3060 } 3061 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace*1000000) <= now { 3062 lasttrace = now 3063 schedtrace(debug.scheddetail > 0) 3064 } 3065 } 3066 } 3067 3068 var pdesc [_MaxGomaxprocs]struct { 3069 schedtick uint32 3070 schedwhen int64 3071 syscalltick uint32 3072 syscallwhen int64 3073 } 3074 3075 // forcePreemptNS is the time slice given to a G before it is 3076 // preempted. 3077 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 3078 3079 func retake(now int64) uint32 { 3080 n := 0 3081 for i := int32(0); i < gomaxprocs; i++ { 3082 _p_ := allp[i] 3083 if _p_ == nil { 3084 continue 3085 } 3086 pd := &pdesc[i] 3087 s := _p_.status 3088 if s == _Psyscall { 3089 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 3090 t := int64(_p_.syscalltick) 3091 if int64(pd.syscalltick) != t { 3092 pd.syscalltick = uint32(t) 3093 pd.syscallwhen = now 3094 continue 3095 } 3096 // On the one hand we don't want to retake Ps if there is no other work to do, 3097 // but on the other hand we want to retake them eventually 3098 // because they can prevent the sysmon thread from deep sleep. 3099 if runqempty(_p_) && atomicload(&sched.nmspinning)+atomicload(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 3100 continue 3101 } 3102 // Need to decrement number of idle locked M's 3103 // (pretending that one more is running) before the CAS. 3104 // Otherwise the M from which we retake can exit the syscall, 3105 // increment nmidle and report deadlock. 3106 incidlelocked(-1) 3107 if cas(&_p_.status, s, _Pidle) { 3108 if trace.enabled { 3109 traceGoSysBlock(_p_) 3110 traceProcStop(_p_) 3111 } 3112 n++ 3113 _p_.syscalltick++ 3114 handoffp(_p_) 3115 } 3116 incidlelocked(1) 3117 } else if s == _Prunning { 3118 // Preempt G if it's running for too long. 3119 t := int64(_p_.schedtick) 3120 if int64(pd.schedtick) != t { 3121 pd.schedtick = uint32(t) 3122 pd.schedwhen = now 3123 continue 3124 } 3125 if pd.schedwhen+forcePreemptNS > now { 3126 continue 3127 } 3128 preemptone(_p_) 3129 } 3130 } 3131 return uint32(n) 3132 } 3133 3134 // Tell all goroutines that they have been preempted and they should stop. 3135 // This function is purely best-effort. It can fail to inform a goroutine if a 3136 // processor just started running it. 3137 // No locks need to be held. 3138 // Returns true if preemption request was issued to at least one goroutine. 3139 func preemptall() bool { 3140 res := false 3141 for i := int32(0); i < gomaxprocs; i++ { 3142 _p_ := allp[i] 3143 if _p_ == nil || _p_.status != _Prunning { 3144 continue 3145 } 3146 if preemptone(_p_) { 3147 res = true 3148 } 3149 } 3150 return res 3151 } 3152 3153 // Tell the goroutine running on processor P to stop. 3154 // This function is purely best-effort. It can incorrectly fail to inform the 3155 // goroutine. It can send inform the wrong goroutine. Even if it informs the 3156 // correct goroutine, that goroutine might ignore the request if it is 3157 // simultaneously executing newstack. 3158 // No lock needs to be held. 3159 // Returns true if preemption request was issued. 3160 // The actual preemption will happen at some point in the future 3161 // and will be indicated by the gp->status no longer being 3162 // Grunning 3163 func preemptone(_p_ *p) bool { 3164 mp := _p_.m.ptr() 3165 if mp == nil || mp == getg().m { 3166 return false 3167 } 3168 gp := mp.curg 3169 if gp == nil || gp == mp.g0 { 3170 return false 3171 } 3172 3173 gp.preempt = true 3174 3175 // Every call in a go routine checks for stack overflow by 3176 // comparing the current stack pointer to gp->stackguard0. 3177 // Setting gp->stackguard0 to StackPreempt folds 3178 // preemption into the normal stack overflow check. 3179 gp.stackguard0 = stackPreempt 3180 return true 3181 } 3182 3183 var starttime int64 3184 3185 func schedtrace(detailed bool) { 3186 now := nanotime() 3187 if starttime == 0 { 3188 starttime = now 3189 } 3190 3191 lock(&sched.lock) 3192 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 3193 if detailed { 3194 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 3195 } 3196 // We must be careful while reading data from P's, M's and G's. 3197 // Even if we hold schedlock, most data can be changed concurrently. 3198 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 3199 for i := int32(0); i < gomaxprocs; i++ { 3200 _p_ := allp[i] 3201 if _p_ == nil { 3202 continue 3203 } 3204 mp := _p_.m.ptr() 3205 h := atomicload(&_p_.runqhead) 3206 t := atomicload(&_p_.runqtail) 3207 if detailed { 3208 id := int32(-1) 3209 if mp != nil { 3210 id = mp.id 3211 } 3212 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") 3213 } else { 3214 // In non-detailed mode format lengths of per-P run queues as: 3215 // [len1 len2 len3 len4] 3216 print(" ") 3217 if i == 0 { 3218 print("[") 3219 } 3220 print(t - h) 3221 if i == gomaxprocs-1 { 3222 print("]\n") 3223 } 3224 } 3225 } 3226 3227 if !detailed { 3228 unlock(&sched.lock) 3229 return 3230 } 3231 3232 for mp := allm; mp != nil; mp = mp.alllink { 3233 _p_ := mp.p.ptr() 3234 gp := mp.curg 3235 lockedg := mp.lockedg 3236 id1 := int32(-1) 3237 if _p_ != nil { 3238 id1 = _p_.id 3239 } 3240 id2 := int64(-1) 3241 if gp != nil { 3242 id2 = gp.goid 3243 } 3244 id3 := int64(-1) 3245 if lockedg != nil { 3246 id3 = lockedg.goid 3247 } 3248 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", getg().m.blocked, " lockedg=", id3, "\n") 3249 } 3250 3251 lock(&allglock) 3252 for gi := 0; gi < len(allgs); gi++ { 3253 gp := allgs[gi] 3254 mp := gp.m 3255 lockedm := gp.lockedm 3256 id1 := int32(-1) 3257 if mp != nil { 3258 id1 = mp.id 3259 } 3260 id2 := int32(-1) 3261 if lockedm != nil { 3262 id2 = lockedm.id 3263 } 3264 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n") 3265 } 3266 unlock(&allglock) 3267 unlock(&sched.lock) 3268 } 3269 3270 // Put mp on midle list. 3271 // Sched must be locked. 3272 // May run during STW, so write barriers are not allowed. 3273 //go:nowritebarrier 3274 func mput(mp *m) { 3275 mp.schedlink = sched.midle 3276 sched.midle.set(mp) 3277 sched.nmidle++ 3278 checkdead() 3279 } 3280 3281 // Try to get an m from midle list. 3282 // Sched must be locked. 3283 // May run during STW, so write barriers are not allowed. 3284 //go:nowritebarrier 3285 func mget() *m { 3286 mp := sched.midle.ptr() 3287 if mp != nil { 3288 sched.midle = mp.schedlink 3289 sched.nmidle-- 3290 } 3291 return mp 3292 } 3293 3294 // Put gp on the global runnable queue. 3295 // Sched must be locked. 3296 // May run during STW, so write barriers are not allowed. 3297 //go:nowritebarrier 3298 func globrunqput(gp *g) { 3299 gp.schedlink = 0 3300 if sched.runqtail != 0 { 3301 sched.runqtail.ptr().schedlink.set(gp) 3302 } else { 3303 sched.runqhead.set(gp) 3304 } 3305 sched.runqtail.set(gp) 3306 sched.runqsize++ 3307 } 3308 3309 // Put gp at the head of the global runnable queue. 3310 // Sched must be locked. 3311 // May run during STW, so write barriers are not allowed. 3312 //go:nowritebarrier 3313 func globrunqputhead(gp *g) { 3314 gp.schedlink = sched.runqhead 3315 sched.runqhead.set(gp) 3316 if sched.runqtail == 0 { 3317 sched.runqtail.set(gp) 3318 } 3319 sched.runqsize++ 3320 } 3321 3322 // Put a batch of runnable goroutines on the global runnable queue. 3323 // Sched must be locked. 3324 func globrunqputbatch(ghead *g, gtail *g, n int32) { 3325 gtail.schedlink = 0 3326 if sched.runqtail != 0 { 3327 sched.runqtail.ptr().schedlink.set(ghead) 3328 } else { 3329 sched.runqhead.set(ghead) 3330 } 3331 sched.runqtail.set(gtail) 3332 sched.runqsize += n 3333 } 3334 3335 // Try get a batch of G's from the global runnable queue. 3336 // Sched must be locked. 3337 func globrunqget(_p_ *p, max int32) *g { 3338 if sched.runqsize == 0 { 3339 return nil 3340 } 3341 3342 n := sched.runqsize/gomaxprocs + 1 3343 if n > sched.runqsize { 3344 n = sched.runqsize 3345 } 3346 if max > 0 && n > max { 3347 n = max 3348 } 3349 if n > int32(len(_p_.runq))/2 { 3350 n = int32(len(_p_.runq)) / 2 3351 } 3352 3353 sched.runqsize -= n 3354 if sched.runqsize == 0 { 3355 sched.runqtail = 0 3356 } 3357 3358 gp := sched.runqhead.ptr() 3359 sched.runqhead = gp.schedlink 3360 n-- 3361 for ; n > 0; n-- { 3362 gp1 := sched.runqhead.ptr() 3363 sched.runqhead = gp1.schedlink 3364 runqput(_p_, gp1, false) 3365 } 3366 return gp 3367 } 3368 3369 // Put p to on _Pidle list. 3370 // Sched must be locked. 3371 // May run during STW, so write barriers are not allowed. 3372 //go:nowritebarrier 3373 func pidleput(_p_ *p) { 3374 if !runqempty(_p_) { 3375 throw("pidleput: P has non-empty run queue") 3376 } 3377 _p_.link = sched.pidle 3378 sched.pidle.set(_p_) 3379 xadd(&sched.npidle, 1) // TODO: fast atomic 3380 } 3381 3382 // Try get a p from _Pidle list. 3383 // Sched must be locked. 3384 // May run during STW, so write barriers are not allowed. 3385 //go:nowritebarrier 3386 func pidleget() *p { 3387 _p_ := sched.pidle.ptr() 3388 if _p_ != nil { 3389 sched.pidle = _p_.link 3390 xadd(&sched.npidle, -1) // TODO: fast atomic 3391 } 3392 return _p_ 3393 } 3394 3395 // runqempty returns true if _p_ has no Gs on its local run queue. 3396 // Note that this test is generally racy. 3397 func runqempty(_p_ *p) bool { 3398 return _p_.runqhead == _p_.runqtail && _p_.runnext == 0 3399 } 3400 3401 // To shake out latent assumptions about scheduling order, 3402 // we introduce some randomness into scheduling decisions 3403 // when running with the race detector. 3404 // The need for this was made obvious by changing the 3405 // (deterministic) scheduling order in Go 1.5 and breaking 3406 // many poorly-written tests. 3407 // With the randomness here, as long as the tests pass 3408 // consistently with -race, they shouldn't have latent scheduling 3409 // assumptions. 3410 const randomizeScheduler = raceenabled 3411 3412 // runqput tries to put g on the local runnable queue. 3413 // If next if false, runqput adds g to the tail of the runnable queue. 3414 // If next is true, runqput puts g in the _p_.runnext slot. 3415 // If the run queue is full, runnext puts g on the global queue. 3416 // Executed only by the owner P. 3417 func runqput(_p_ *p, gp *g, next bool) { 3418 if randomizeScheduler && next && fastrand1()%2 == 0 { 3419 next = false 3420 } 3421 3422 if next { 3423 retryNext: 3424 oldnext := _p_.runnext 3425 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 3426 goto retryNext 3427 } 3428 if oldnext == 0 { 3429 return 3430 } 3431 // Kick the old runnext out to the regular run queue. 3432 gp = oldnext.ptr() 3433 } 3434 3435 retry: 3436 h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers 3437 t := _p_.runqtail 3438 if t-h < uint32(len(_p_.runq)) { 3439 _p_.runq[t%uint32(len(_p_.runq))] = gp 3440 atomicstore(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 3441 return 3442 } 3443 if runqputslow(_p_, gp, h, t) { 3444 return 3445 } 3446 // the queue is not full, now the put above must suceed 3447 goto retry 3448 } 3449 3450 // Put g and a batch of work from local runnable queue on global queue. 3451 // Executed only by the owner P. 3452 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 3453 var batch [len(_p_.runq)/2 + 1]*g 3454 3455 // First, grab a batch from local queue. 3456 n := t - h 3457 n = n / 2 3458 if n != uint32(len(_p_.runq)/2) { 3459 throw("runqputslow: queue is not full") 3460 } 3461 for i := uint32(0); i < n; i++ { 3462 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))] 3463 } 3464 if !cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 3465 return false 3466 } 3467 batch[n] = gp 3468 3469 if randomizeScheduler { 3470 for i := uint32(1); i <= n; i++ { 3471 j := fastrand1() % (i + 1) 3472 batch[i], batch[j] = batch[j], batch[i] 3473 } 3474 } 3475 3476 // Link the goroutines. 3477 for i := uint32(0); i < n; i++ { 3478 batch[i].schedlink.set(batch[i+1]) 3479 } 3480 3481 // Now put the batch on global queue. 3482 lock(&sched.lock) 3483 globrunqputbatch(batch[0], batch[n], int32(n+1)) 3484 unlock(&sched.lock) 3485 return true 3486 } 3487 3488 // Get g from local runnable queue. 3489 // If inheritTime is true, gp should inherit the remaining time in the 3490 // current time slice. Otherwise, it should start a new time slice. 3491 // Executed only by the owner P. 3492 func runqget(_p_ *p) (gp *g, inheritTime bool) { 3493 // If there's a runnext, it's the next G to run. 3494 for { 3495 next := _p_.runnext 3496 if next == 0 { 3497 break 3498 } 3499 if _p_.runnext.cas(next, 0) { 3500 return next.ptr(), true 3501 } 3502 } 3503 3504 for { 3505 h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers 3506 t := _p_.runqtail 3507 if t == h { 3508 return nil, false 3509 } 3510 gp := _p_.runq[h%uint32(len(_p_.runq))] 3511 if cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume 3512 return gp, false 3513 } 3514 } 3515 } 3516 3517 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 3518 // Batch is a ring buffer starting at batchHead. 3519 // Returns number of grabbed goroutines. 3520 // Can be executed by any P. 3521 func runqgrab(_p_ *p, batch *[256]*g, batchHead uint32, stealRunNextG bool) uint32 { 3522 for { 3523 h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers 3524 t := atomicload(&_p_.runqtail) // load-acquire, synchronize with the producer 3525 n := t - h 3526 n = n - n/2 3527 if n == 0 { 3528 if stealRunNextG { 3529 // Try to steal from _p_.runnext. 3530 if next := _p_.runnext; next != 0 { 3531 // Sleep to ensure that _p_ isn't about to run the g we 3532 // are about to steal. 3533 // The important use case here is when the g running on _p_ 3534 // ready()s another g and then almost immediately blocks. 3535 // Instead of stealing runnext in this window, back off 3536 // to give _p_ a chance to schedule runnext. This will avoid 3537 // thrashing gs between different Ps. 3538 usleep(100) 3539 if !_p_.runnext.cas(next, 0) { 3540 continue 3541 } 3542 batch[batchHead%uint32(len(batch))] = next.ptr() 3543 return 1 3544 } 3545 } 3546 return 0 3547 } 3548 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 3549 continue 3550 } 3551 for i := uint32(0); i < n; i++ { 3552 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 3553 batch[(batchHead+i)%uint32(len(batch))] = g 3554 } 3555 if cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 3556 return n 3557 } 3558 } 3559 } 3560 3561 // Steal half of elements from local runnable queue of p2 3562 // and put onto local runnable queue of p. 3563 // Returns one of the stolen elements (or nil if failed). 3564 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 3565 t := _p_.runqtail 3566 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 3567 if n == 0 { 3568 return nil 3569 } 3570 n-- 3571 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))] 3572 if n == 0 { 3573 return gp 3574 } 3575 h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers 3576 if t-h+n >= uint32(len(_p_.runq)) { 3577 throw("runqsteal: runq overflow") 3578 } 3579 atomicstore(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 3580 return gp 3581 } 3582 3583 func testSchedLocalQueue() { 3584 _p_ := new(p) 3585 gs := make([]g, len(_p_.runq)) 3586 for i := 0; i < len(_p_.runq); i++ { 3587 if g, _ := runqget(_p_); g != nil { 3588 throw("runq is not empty initially") 3589 } 3590 for j := 0; j < i; j++ { 3591 runqput(_p_, &gs[i], false) 3592 } 3593 for j := 0; j < i; j++ { 3594 if g, _ := runqget(_p_); g != &gs[i] { 3595 print("bad element at iter ", i, "/", j, "\n") 3596 throw("bad element") 3597 } 3598 } 3599 if g, _ := runqget(_p_); g != nil { 3600 throw("runq is not empty afterwards") 3601 } 3602 } 3603 } 3604 3605 func testSchedLocalQueueSteal() { 3606 p1 := new(p) 3607 p2 := new(p) 3608 gs := make([]g, len(p1.runq)) 3609 for i := 0; i < len(p1.runq); i++ { 3610 for j := 0; j < i; j++ { 3611 gs[j].sig = 0 3612 runqput(p1, &gs[j], false) 3613 } 3614 gp := runqsteal(p2, p1, true) 3615 s := 0 3616 if gp != nil { 3617 s++ 3618 gp.sig++ 3619 } 3620 for { 3621 gp, _ = runqget(p2) 3622 if gp == nil { 3623 break 3624 } 3625 s++ 3626 gp.sig++ 3627 } 3628 for { 3629 gp, _ = runqget(p1) 3630 if gp == nil { 3631 break 3632 } 3633 gp.sig++ 3634 } 3635 for j := 0; j < i; j++ { 3636 if gs[j].sig != 1 { 3637 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n") 3638 throw("bad element") 3639 } 3640 } 3641 if s != i/2 && s != i/2+1 { 3642 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n") 3643 throw("bad steal") 3644 } 3645 } 3646 } 3647 3648 func setMaxThreads(in int) (out int) { 3649 lock(&sched.lock) 3650 out = int(sched.maxmcount) 3651 sched.maxmcount = int32(in) 3652 checkmcount() 3653 unlock(&sched.lock) 3654 return 3655 } 3656 3657 func haveexperiment(name string) bool { 3658 x := goexperiment 3659 for x != "" { 3660 xname := "" 3661 i := index(x, ",") 3662 if i < 0 { 3663 xname, x = x, "" 3664 } else { 3665 xname, x = x[:i], x[i+1:] 3666 } 3667 if xname == name { 3668 return true 3669 } 3670 } 3671 return false 3672 } 3673 3674 //go:nosplit 3675 func procPin() int { 3676 _g_ := getg() 3677 mp := _g_.m 3678 3679 mp.locks++ 3680 return int(mp.p.ptr().id) 3681 } 3682 3683 //go:nosplit 3684 func procUnpin() { 3685 _g_ := getg() 3686 _g_.m.locks-- 3687 } 3688 3689 //go:linkname sync_runtime_procPin sync.runtime_procPin 3690 //go:nosplit 3691 func sync_runtime_procPin() int { 3692 return procPin() 3693 } 3694 3695 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 3696 //go:nosplit 3697 func sync_runtime_procUnpin() { 3698 procUnpin() 3699 } 3700 3701 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 3702 //go:nosplit 3703 func sync_atomic_runtime_procPin() int { 3704 return procPin() 3705 } 3706 3707 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 3708 //go:nosplit 3709 func sync_atomic_runtime_procUnpin() { 3710 procUnpin() 3711 } 3712 3713 // Active spinning for sync.Mutex. 3714 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 3715 //go:nosplit 3716 func sync_runtime_canSpin(i int) bool { 3717 // sync.Mutex is cooperative, so we are conservative with spinning. 3718 // Spin only few times and only if running on a multicore machine and 3719 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 3720 // As opposed to runtime mutex we don't do passive spinning here, 3721 // because there can be work on global runq on on other Ps. 3722 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 3723 return false 3724 } 3725 if p := getg().m.p.ptr(); !runqempty(p) { 3726 return false 3727 } 3728 return true 3729 } 3730 3731 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 3732 //go:nosplit 3733 func sync_runtime_doSpin() { 3734 procyield(active_spin_cnt) 3735 }