github.com/c0deoo1/golang1.5@v0.0.0-20220525150107-c87c805d4593/src/runtime/proc1.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 var ( 10 m0 m 11 g0 g // g0和m0的一些初始化动作会在asm_amd64.s中执行 12 ) 13 14 // Goroutine scheduler 15 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 16 // 17 // The main concepts are: 18 // G - goroutine. 19 // M - worker thread, or machine. 20 // P - processor, a resource that is required to execute Go code. 21 // M must have an associated P to execute Go code, however it can be 22 // blocked or in a syscall w/o an associated P. 23 // 24 // Design doc at https://golang.org/s/go11sched. 25 26 const ( 27 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 28 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 29 _GoidCacheBatch = 16 30 ) 31 32 // The bootstrap sequence is: 33 // 34 // call osinit 35 // call schedinit 36 // make & queue new G 37 // call runtime·mstart 38 // 39 // The new G calls runtime·main. 40 func schedinit() { 41 // raceinit must be the first call to race detector. 42 // In particular, it must be done before mallocinit below calls racemapshadow. 43 _g_ := getg() 44 if raceenabled { 45 // TODO race的用法 46 _g_.racectx = raceinit() 47 } 48 // 最大10000个线程 49 sched.maxmcount = 10000 50 51 // Cache the framepointer experiment. This affects stack unwinding. 52 framepointer_enabled = haveexperiment("framepointer") 53 54 tracebackinit() // 获取一些runtime函数的地址 55 moduledataverify() // 判断modeule信息是否合法 56 stackinit() // 全局的栈缓存的初始化 57 mallocinit() // 内存管理的初始化 58 mcommoninit(_g_.m) 59 60 goargs() // 解析并复制args,供os.runtime_args调用 61 goenvs() // 解析并复制env,供syscall.runtime_envs调用 62 parsedebugvars() // 解析一些GODEBUG开关,开关配置一些调试的行为 63 gcinit() // gc的初始化 64 65 sched.lastpoll = uint64(nanotime()) 66 procs := int(ncpu) 67 if n := atoi(gogetenv("GOMAXPROCS")); n > 0 { 68 if n > _MaxGomaxprocs { 69 n = _MaxGomaxprocs 70 } 71 procs = n 72 } 73 if procresize(int32(procs)) != nil { 74 throw("unknown runnable goroutine during bootstrap") 75 } 76 77 if buildVersion == "" { 78 // Condition should never trigger. This code just serves 79 // to ensure runtime·buildVersion is kept in the resulting binary. 80 buildVersion = "unknown" 81 } 82 } 83 84 func dumpgstatus(gp *g) { 85 _g_ := getg() 86 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 87 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 88 } 89 90 func checkmcount() { 91 // sched lock is held 92 if sched.mcount > sched.maxmcount { 93 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 94 throw("thread exhaustion") 95 } 96 } 97 98 func mcommoninit(mp *m) { 99 _g_ := getg() 100 101 // g0 stack won't make sense for user (and is not necessary unwindable). 102 if _g_ != _g_.m.g0 { 103 callers(1, mp.createstack[:]) 104 } 105 106 mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks()) 107 if mp.fastrand == 0 { 108 mp.fastrand = 0x49f6428a 109 } 110 111 lock(&sched.lock) 112 mp.id = sched.mcount 113 sched.mcount++ 114 checkmcount() 115 mpreinit(mp) 116 if mp.gsignal != nil { 117 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 118 } 119 120 // Add to allm so garbage collector doesn't free g->m 121 // when it is just in a register or thread-local storage. 122 mp.alllink = allm 123 124 // NumCgoCall() iterates over allm w/o schedlock, 125 // so we need to publish it safely. 126 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 127 unlock(&sched.lock) 128 } 129 130 // Mark gp ready to run. 131 func ready(gp *g, traceskip int) { 132 if trace.enabled { 133 traceGoUnpark(gp, traceskip) 134 } 135 136 status := readgstatus(gp) 137 138 // Mark runnable. 139 _g_ := getg() 140 _g_.m.locks++ // disable preemption because it can be holding p in a local var 141 if status&^_Gscan != _Gwaiting { 142 dumpgstatus(gp) 143 throw("bad g->status in ready") 144 } 145 146 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 147 casgstatus(gp, _Gwaiting, _Grunnable) 148 runqput(_g_.m.p.ptr(), gp, true) 149 if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 { // TODO: fast atomic 150 wakep() 151 } 152 _g_.m.locks-- 153 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 154 _g_.stackguard0 = stackPreempt 155 } 156 } 157 158 func gcprocs() int32 { 159 // Figure out how many CPUs to use during GC. 160 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 161 lock(&sched.lock) 162 n := gomaxprocs 163 if n > ncpu { 164 n = ncpu 165 } 166 if n > _MaxGcproc { 167 n = _MaxGcproc 168 } 169 if n > sched.nmidle+1 { // one M is currently running 170 n = sched.nmidle + 1 171 } 172 unlock(&sched.lock) 173 return n 174 } 175 176 func needaddgcproc() bool { 177 lock(&sched.lock) 178 n := gomaxprocs 179 if n > ncpu { 180 n = ncpu 181 } 182 if n > _MaxGcproc { 183 n = _MaxGcproc 184 } 185 n -= sched.nmidle + 1 // one M is currently running 186 unlock(&sched.lock) 187 return n > 0 188 } 189 190 func helpgc(nproc int32) { 191 _g_ := getg() 192 lock(&sched.lock) 193 pos := 0 194 for n := int32(1); n < nproc; n++ { // one M is currently running 195 if allp[pos].mcache == _g_.m.mcache { 196 pos++ 197 } 198 mp := mget() 199 if mp == nil { 200 throw("gcprocs inconsistency") 201 } 202 mp.helpgc = n 203 mp.p.set(allp[pos]) 204 mp.mcache = allp[pos].mcache 205 pos++ 206 notewakeup(&mp.park) 207 } 208 unlock(&sched.lock) 209 } 210 211 // freezeStopWait is a large value that freezetheworld sets 212 // sched.stopwait to in order to request that all Gs permanently stop. 213 const freezeStopWait = 0x7fffffff 214 215 // Similar to stopTheWorld but best-effort and can be called several times. 216 // There is no reverse operation, used during crashing. 217 // This function must not lock any mutexes. 218 func freezetheworld() { 219 // stopwait and preemption requests can be lost 220 // due to races with concurrently executing threads, 221 // so try several times 222 for i := 0; i < 5; i++ { 223 // this should tell the scheduler to not start any new goroutines 224 sched.stopwait = freezeStopWait 225 atomicstore(&sched.gcwaiting, 1) 226 // this should stop running goroutines 227 if !preemptall() { 228 break // no running goroutines 229 } 230 usleep(1000) 231 } 232 // to be sure 233 usleep(1000) 234 preemptall() 235 usleep(1000) 236 } 237 238 func isscanstatus(status uint32) bool { 239 if status == _Gscan { 240 throw("isscanstatus: Bad status Gscan") 241 } 242 return status&_Gscan == _Gscan 243 } 244 245 // All reads and writes of g's status go through readgstatus, casgstatus 246 // castogscanstatus, casfrom_Gscanstatus. 247 //go:nosplit 248 func readgstatus(gp *g) uint32 { 249 return atomicload(&gp.atomicstatus) 250 } 251 252 // Ownership of gscanvalid: 253 // 254 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), 255 // then gp owns gp.gscanvalid, and other goroutines must not modify it. 256 // 257 // Otherwise, a second goroutine can lock the scan state by setting _Gscan 258 // in the status bit and then modify gscanvalid, and then unlock the scan state. 259 // 260 // Note that the first condition implies an exception to the second: 261 // if a second goroutine changes gp's status to _Grunning|_Gscan, 262 // that second goroutine still does not have the right to modify gscanvalid. 263 264 // The Gscanstatuses are acting like locks and this releases them. 265 // If it proves to be a performance hit we should be able to make these 266 // simple atomic stores but for now we are going to throw if 267 // we see an inconsistent state. 268 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 269 success := false 270 271 // Check that transition is valid. 272 switch oldval { 273 default: 274 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 275 dumpgstatus(gp) 276 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 277 case _Gscanrunnable, 278 _Gscanwaiting, 279 _Gscanrunning, 280 _Gscansyscall: 281 if newval == oldval&^_Gscan { 282 success = cas(&gp.atomicstatus, oldval, newval) 283 } 284 case _Gscanenqueue: 285 if newval == _Gwaiting { 286 success = cas(&gp.atomicstatus, oldval, newval) 287 } 288 } 289 if !success { 290 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 291 dumpgstatus(gp) 292 throw("casfrom_Gscanstatus: gp->status is not in scan state") 293 } 294 if newval == _Grunning { 295 gp.gcscanvalid = false 296 } 297 } 298 299 // This will return false if the gp is not in the expected status and the cas fails. 300 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 301 func castogscanstatus(gp *g, oldval, newval uint32) bool { 302 switch oldval { 303 case _Grunnable, 304 _Gwaiting, 305 _Gsyscall: 306 if newval == oldval|_Gscan { 307 return cas(&gp.atomicstatus, oldval, newval) 308 } 309 case _Grunning: 310 if newval == _Gscanrunning || newval == _Gscanenqueue { 311 return cas(&gp.atomicstatus, oldval, newval) 312 } 313 } 314 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 315 throw("castogscanstatus") 316 panic("not reached") 317 } 318 319 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 320 // and casfrom_Gscanstatus instead. 321 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 322 // put it in the Gscan state is finished. 323 //go:nosplit 324 func casgstatus(gp *g, oldval, newval uint32) { 325 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 326 systemstack(func() { 327 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 328 throw("casgstatus: bad incoming values") 329 }) 330 } 331 332 if oldval == _Grunning && gp.gcscanvalid { 333 // If oldvall == _Grunning, then the actual status must be 334 // _Grunning or _Grunning|_Gscan; either way, 335 // we own gp.gcscanvalid, so it's safe to read. 336 // gp.gcscanvalid must not be true when we are running. 337 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n") 338 throw("casgstatus") 339 } 340 341 // loop if gp->atomicstatus is in a scan state giving 342 // GC time to finish and change the state to oldval. 343 for !cas(&gp.atomicstatus, oldval, newval) { 344 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 345 systemstack(func() { 346 throw("casgstatus: waiting for Gwaiting but is Grunnable") 347 }) 348 } 349 // Help GC if needed. 350 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { 351 // gp.preemptscan = false 352 // systemstack(func() { 353 // gcphasework(gp) 354 // }) 355 // } 356 } 357 if newval == _Grunning { 358 gp.gcscanvalid = false 359 } 360 } 361 362 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 363 // Returns old status. Cannot call casgstatus directly, because we are racing with an 364 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 365 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 366 // it would loop waiting for the status to go back to Gwaiting, which it never will. 367 //go:nosplit 368 func casgcopystack(gp *g) uint32 { 369 for { 370 oldstatus := readgstatus(gp) &^ _Gscan 371 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 372 throw("copystack: bad status, not Gwaiting or Grunnable") 373 } 374 if cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 375 return oldstatus 376 } 377 } 378 } 379 380 // scang blocks until gp's stack has been scanned. 381 // It might be scanned by scang or it might be scanned by the goroutine itself. 382 // Either way, the stack scan has completed when scang returns. 383 func scang(gp *g) { 384 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone. 385 // Nothing is racing with us now, but gcscandone might be set to true left over 386 // from an earlier round of stack scanning (we scan twice per GC). 387 // We use gcscandone to record whether the scan has been done during this round. 388 // It is important that the scan happens exactly once: if called twice, 389 // the installation of stack barriers will detect the double scan and die. 390 391 gp.gcscandone = false 392 393 // Endeavor to get gcscandone set to true, 394 // either by doing the stack scan ourselves or by coercing gp to scan itself. 395 // gp.gcscandone can transition from false to true when we're not looking 396 // (if we asked for preemption), so any time we lock the status using 397 // castogscanstatus we have to double-check that the scan is still not done. 398 for !gp.gcscandone { 399 switch s := readgstatus(gp); s { 400 default: 401 dumpgstatus(gp) 402 throw("stopg: invalid status") 403 404 case _Gdead: 405 // No stack. 406 gp.gcscandone = true 407 408 case _Gcopystack: 409 // Stack being switched. Go around again. 410 411 case _Grunnable, _Gsyscall, _Gwaiting: 412 // Claim goroutine by setting scan bit. 413 // Racing with execution or readying of gp. 414 // The scan bit keeps them from running 415 // the goroutine until we're done. 416 if castogscanstatus(gp, s, s|_Gscan) { 417 if !gp.gcscandone { 418 // Coordinate with traceback 419 // in sigprof. 420 for !cas(&gp.stackLock, 0, 1) { 421 osyield() 422 } 423 scanstack(gp) 424 atomicstore(&gp.stackLock, 0) 425 gp.gcscandone = true 426 } 427 restartg(gp) 428 } 429 430 case _Gscanwaiting: 431 // newstack is doing a scan for us right now. Wait. 432 433 case _Grunning: 434 // Goroutine running. Try to preempt execution so it can scan itself. 435 // The preemption handler (in newstack) does the actual scan. 436 437 // Optimization: if there is already a pending preemption request 438 // (from the previous loop iteration), don't bother with the atomics. 439 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt { 440 break 441 } 442 443 // Ask for preemption and self scan. 444 if castogscanstatus(gp, _Grunning, _Gscanrunning) { 445 if !gp.gcscandone { 446 gp.preemptscan = true 447 gp.preempt = true 448 gp.stackguard0 = stackPreempt 449 } 450 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning) 451 } 452 } 453 } 454 455 gp.preemptscan = false // cancel scan request if no longer needed 456 } 457 458 // The GC requests that this routine be moved from a scanmumble state to a mumble state. 459 func restartg(gp *g) { 460 s := readgstatus(gp) 461 switch s { 462 default: 463 dumpgstatus(gp) 464 throw("restartg: unexpected status") 465 466 case _Gdead: 467 // ok 468 469 case _Gscanrunnable, 470 _Gscanwaiting, 471 _Gscansyscall: 472 casfrom_Gscanstatus(gp, s, s&^_Gscan) 473 474 // Scan is now completed. 475 // Goroutine now needs to be made runnable. 476 // We put it on the global run queue; ready blocks on the global scheduler lock. 477 case _Gscanenqueue: 478 casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting) 479 if gp != getg().m.curg { 480 throw("processing Gscanenqueue on wrong m") 481 } 482 dropg() 483 ready(gp, 0) 484 } 485 } 486 487 // stopTheWorld stops all P's from executing goroutines, interrupting 488 // all goroutines at GC safe points and records reason as the reason 489 // for the stop. On return, only the current goroutine's P is running. 490 // stopTheWorld must not be called from a system stack and the caller 491 // must not hold worldsema. The caller must call startTheWorld when 492 // other P's should resume execution. 493 // 494 // stopTheWorld is safe for multiple goroutines to call at the 495 // same time. Each will execute its own stop, and the stops will 496 // be serialized. 497 // 498 // This is also used by routines that do stack dumps. If the system is 499 // in panic or being exited, this may not reliably stop all 500 // goroutines. 501 func stopTheWorld(reason string) { 502 semacquire(&worldsema, false) 503 getg().m.preemptoff = reason 504 systemstack(stopTheWorldWithSema) 505 } 506 507 // startTheWorld undoes the effects of stopTheWorld. 508 func startTheWorld() { 509 systemstack(startTheWorldWithSema) 510 // worldsema must be held over startTheWorldWithSema to ensure 511 // gomaxprocs cannot change while worldsema is held. 512 semrelease(&worldsema) 513 getg().m.preemptoff = "" 514 } 515 516 // Holding worldsema grants an M the right to try to stop the world 517 // and prevents gomaxprocs from changing concurrently. 518 var worldsema uint32 = 1 519 520 // stopTheWorldWithSema is the core implementation of stopTheWorld. 521 // The caller is responsible for acquiring worldsema and disabling 522 // preemption first and then should stopTheWorldWithSema on the system 523 // stack: 524 // 525 // semacquire(&worldsema, false) 526 // m.preemptoff = "reason" 527 // systemstack(stopTheWorldWithSema) 528 // 529 // When finished, the caller must either call startTheWorld or undo 530 // these three operations separately: 531 // 532 // m.preemptoff = "" 533 // systemstack(startTheWorldWithSema) 534 // semrelease(&worldsema) 535 // 536 // It is allowed to acquire worldsema once and then execute multiple 537 // startTheWorldWithSema/stopTheWorldWithSema pairs. 538 // Other P's are able to execute between successive calls to 539 // startTheWorldWithSema and stopTheWorldWithSema. 540 // Holding worldsema causes any other goroutines invoking 541 // stopTheWorld to block. 542 func stopTheWorldWithSema() { 543 _g_ := getg() 544 545 // If we hold a lock, then we won't be able to stop another M 546 // that is blocked trying to acquire the lock. 547 if _g_.m.locks > 0 { 548 throw("stopTheWorld: holding locks") 549 } 550 551 lock(&sched.lock) 552 sched.stopwait = gomaxprocs 553 atomicstore(&sched.gcwaiting, 1) 554 preemptall() 555 // stop current P 556 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 557 sched.stopwait-- 558 // try to retake all P's in Psyscall status 559 for i := 0; i < int(gomaxprocs); i++ { 560 p := allp[i] 561 s := p.status 562 if s == _Psyscall && cas(&p.status, s, _Pgcstop) { 563 if trace.enabled { 564 traceGoSysBlock(p) 565 traceProcStop(p) 566 } 567 p.syscalltick++ 568 sched.stopwait-- 569 } 570 } 571 // stop idle P's 572 for { 573 p := pidleget() 574 if p == nil { 575 break 576 } 577 p.status = _Pgcstop 578 sched.stopwait-- 579 } 580 wait := sched.stopwait > 0 581 unlock(&sched.lock) 582 583 // wait for remaining P's to stop voluntarily 584 if wait { 585 for { 586 // wait for 100us, then try to re-preempt in case of any races 587 if notetsleep(&sched.stopnote, 100*1000) { 588 noteclear(&sched.stopnote) 589 break 590 } 591 preemptall() 592 } 593 } 594 if sched.stopwait != 0 { 595 throw("stopTheWorld: not stopped") 596 } 597 for i := 0; i < int(gomaxprocs); i++ { 598 p := allp[i] 599 if p.status != _Pgcstop { 600 throw("stopTheWorld: not stopped") 601 } 602 } 603 } 604 605 func mhelpgc() { 606 _g_ := getg() 607 _g_.m.helpgc = -1 608 } 609 610 func startTheWorldWithSema() { 611 _g_ := getg() 612 613 _g_.m.locks++ // disable preemption because it can be holding p in a local var 614 gp := netpoll(false) // non-blocking 615 injectglist(gp) 616 add := needaddgcproc() 617 lock(&sched.lock) 618 619 procs := gomaxprocs 620 if newprocs != 0 { 621 procs = newprocs 622 newprocs = 0 623 } 624 p1 := procresize(procs) 625 sched.gcwaiting = 0 626 if sched.sysmonwait != 0 { 627 sched.sysmonwait = 0 628 notewakeup(&sched.sysmonnote) 629 } 630 unlock(&sched.lock) 631 632 for p1 != nil { 633 p := p1 634 p1 = p1.link.ptr() 635 if p.m != 0 { 636 mp := p.m.ptr() 637 p.m = 0 638 if mp.nextp != 0 { 639 throw("startTheWorld: inconsistent mp->nextp") 640 } 641 mp.nextp.set(p) 642 notewakeup(&mp.park) 643 } else { 644 // Start M to run P. Do not start another M below. 645 newm(nil, p) 646 add = false 647 } 648 } 649 650 // Wakeup an additional proc in case we have excessive runnable goroutines 651 // in local queues or in the global queue. If we don't, the proc will park itself. 652 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 653 if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 { 654 wakep() 655 } 656 657 if add { 658 // If GC could have used another helper proc, start one now, 659 // in the hope that it will be available next time. 660 // It would have been even better to start it before the collection, 661 // but doing so requires allocating memory, so it's tricky to 662 // coordinate. This lazy approach works out in practice: 663 // we don't mind if the first couple gc rounds don't have quite 664 // the maximum number of procs. 665 newm(mhelpgc, nil) 666 } 667 _g_.m.locks-- 668 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 669 _g_.stackguard0 = stackPreempt 670 } 671 } 672 673 // Called to start an M. 674 // 启动线程,开启调度循环 675 //go:nosplit 676 func mstart() { 677 _g_ := getg() 678 679 if _g_.stack.lo == 0 { 680 // Initialize stack bounds from system stack. 681 // Cgo may have left stack size in stack.hi. 682 size := _g_.stack.hi 683 if size == 0 { 684 size = 8192 * stackGuardMultiplier 685 } 686 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 687 _g_.stack.lo = _g_.stack.hi - size + 1024 688 } 689 // Initialize stack guards so that we can start calling 690 // both Go and C functions with stack growth prologues. 691 _g_.stackguard0 = _g_.stack.lo + _StackGuard 692 _g_.stackguard1 = _g_.stackguard0 693 mstart1() 694 } 695 696 func mstart1() { 697 _g_ := getg() 698 699 if _g_ != _g_.m.g0 { 700 throw("bad runtime·mstart") 701 } 702 703 // Record top of stack for use by mcall. 704 // Once we call schedule we're never coming back, 705 // so other calls can reuse this stack space. 706 gosave(&_g_.m.g0.sched) 707 _g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used 708 asminit() 709 minit() 710 711 // Install signal handlers; after minit so that minit can 712 // prepare the thread to be able to handle the signals. 713 if _g_.m == &m0 { 714 // Create an extra M for callbacks on threads not created by Go. 715 if iscgo && !cgoHasExtraM { 716 cgoHasExtraM = true 717 newextram() 718 } 719 initsig() 720 } 721 722 if fn := _g_.m.mstartfn; fn != nil { 723 fn() 724 } 725 726 if _g_.m.helpgc != 0 { 727 _g_.m.helpgc = 0 728 stopm() 729 } else if _g_.m != &m0 { 730 acquirep(_g_.m.nextp.ptr()) 731 _g_.m.nextp = 0 732 } 733 schedule() 734 } 735 736 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 737 // If a P is currently executing code, this will bring the P to a GC 738 // safe point and execute fn on that P. If the P is not executing code 739 // (it is idle or in a syscall), this will call fn(p) directly while 740 // preventing the P from exiting its state. This does not ensure that 741 // fn will run on every CPU executing Go code, but it acts as a global 742 // memory barrier. GC uses this as a "ragged barrier." 743 // 744 // The caller must hold worldsema. 745 func forEachP(fn func(*p)) { 746 mp := acquirem() 747 _p_ := getg().m.p.ptr() 748 749 lock(&sched.lock) 750 if sched.safePointWait != 0 { 751 throw("forEachP: sched.safePointWait != 0") 752 } 753 sched.safePointWait = gomaxprocs - 1 754 sched.safePointFn = fn 755 756 // Ask all Ps to run the safe point function. 757 for _, p := range allp[:gomaxprocs] { 758 if p != _p_ { 759 atomicstore(&p.runSafePointFn, 1) 760 } 761 } 762 preemptall() 763 764 // Any P entering _Pidle or _Psyscall from now on will observe 765 // p.runSafePointFn == 1 and will call runSafePointFn when 766 // changing its status to _Pidle/_Psyscall. 767 768 // Run safe point function for all idle Ps. sched.pidle will 769 // not change because we hold sched.lock. 770 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 771 if cas(&p.runSafePointFn, 1, 0) { 772 fn(p) 773 sched.safePointWait-- 774 } 775 } 776 777 wait := sched.safePointWait > 0 778 unlock(&sched.lock) 779 780 // Run fn for the current P. 781 fn(_p_) 782 783 // Force Ps currently in _Psyscall into _Pidle and hand them 784 // off to induce safe point function execution. 785 for i := 0; i < int(gomaxprocs); i++ { 786 p := allp[i] 787 s := p.status 788 if s == _Psyscall && p.runSafePointFn == 1 && cas(&p.status, s, _Pidle) { 789 if trace.enabled { 790 traceGoSysBlock(p) 791 traceProcStop(p) 792 } 793 p.syscalltick++ 794 handoffp(p) 795 } 796 } 797 798 // Wait for remaining Ps to run fn. 799 if wait { 800 for { 801 // Wait for 100us, then try to re-preempt in 802 // case of any races. 803 if notetsleep(&sched.safePointNote, 100*1000) { 804 noteclear(&sched.safePointNote) 805 break 806 } 807 preemptall() 808 } 809 } 810 if sched.safePointWait != 0 { 811 throw("forEachP: not done") 812 } 813 for i := 0; i < int(gomaxprocs); i++ { 814 p := allp[i] 815 if p.runSafePointFn != 0 { 816 throw("forEachP: P did not run fn") 817 } 818 } 819 820 lock(&sched.lock) 821 sched.safePointFn = nil 822 unlock(&sched.lock) 823 releasem(mp) 824 } 825 826 // runSafePointFn runs the safe point function, if any, for this P. 827 // This should be called like 828 // 829 // if getg().m.p.runSafePointFn != 0 { 830 // runSafePointFn() 831 // } 832 // 833 // runSafePointFn must be checked on any transition in to _Pidle or 834 // _Psyscall to avoid a race where forEachP sees that the P is running 835 // just before the P goes into _Pidle/_Psyscall and neither forEachP 836 // nor the P run the safe-point function. 837 func runSafePointFn() { 838 p := getg().m.p.ptr() 839 // Resolve the race between forEachP running the safe-point 840 // function on this P's behalf and this P running the 841 // safe-point function directly. 842 if !cas(&p.runSafePointFn, 1, 0) { 843 return 844 } 845 sched.safePointFn(p) 846 lock(&sched.lock) 847 sched.safePointWait-- 848 if sched.safePointWait == 0 { 849 notewakeup(&sched.safePointNote) 850 } 851 unlock(&sched.lock) 852 } 853 854 // When running with cgo, we call _cgo_thread_start 855 // to start threads for us so that we can play nicely with 856 // foreign code. 857 var cgoThreadStart unsafe.Pointer 858 859 type cgothreadstart struct { 860 g guintptr 861 tls *uint64 862 fn unsafe.Pointer 863 } 864 865 // Allocate a new m unassociated with any thread. 866 // Can use p for allocation context if needed. 867 // fn is recorded as the new m's m.mstartfn. 868 func allocm(_p_ *p, fn func()) *m { 869 _g_ := getg() 870 _g_.m.locks++ // disable GC because it can be called from sysmon 871 if _g_.m.p == 0 { 872 acquirep(_p_) // temporarily borrow p for mallocs in this function 873 } 874 mp := new(m) 875 mp.mstartfn = fn 876 mcommoninit(mp) 877 878 // In case of cgo or Solaris, pthread_create will make us a stack. 879 // Windows and Plan 9 will layout sched stack on OS stack. 880 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" { 881 mp.g0 = malg(-1) 882 } else { 883 mp.g0 = malg(8192 * stackGuardMultiplier) 884 } 885 mp.g0.m = mp 886 887 if _p_ == _g_.m.p.ptr() { 888 releasep() 889 } 890 _g_.m.locks-- 891 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 892 _g_.stackguard0 = stackPreempt 893 } 894 895 return mp 896 } 897 898 // needm is called when a cgo callback happens on a 899 // thread without an m (a thread not created by Go). 900 // In this case, needm is expected to find an m to use 901 // and return with m, g initialized correctly. 902 // Since m and g are not set now (likely nil, but see below) 903 // needm is limited in what routines it can call. In particular 904 // it can only call nosplit functions (textflag 7) and cannot 905 // do any scheduling that requires an m. 906 // 907 // In order to avoid needing heavy lifting here, we adopt 908 // the following strategy: there is a stack of available m's 909 // that can be stolen. Using compare-and-swap 910 // to pop from the stack has ABA races, so we simulate 911 // a lock by doing an exchange (via casp) to steal the stack 912 // head and replace the top pointer with MLOCKED (1). 913 // This serves as a simple spin lock that we can use even 914 // without an m. The thread that locks the stack in this way 915 // unlocks the stack by storing a valid stack head pointer. 916 // 917 // In order to make sure that there is always an m structure 918 // available to be stolen, we maintain the invariant that there 919 // is always one more than needed. At the beginning of the 920 // program (if cgo is in use) the list is seeded with a single m. 921 // If needm finds that it has taken the last m off the list, its job 922 // is - once it has installed its own m so that it can do things like 923 // allocate memory - to create a spare m and put it on the list. 924 // 925 // Each of these extra m's also has a g0 and a curg that are 926 // pressed into service as the scheduling stack and current 927 // goroutine for the duration of the cgo callback. 928 // 929 // When the callback is done with the m, it calls dropm to 930 // put the m back on the list. 931 //go:nosplit 932 func needm(x byte) { 933 if iscgo && !cgoHasExtraM { 934 // Can happen if C/C++ code calls Go from a global ctor. 935 // Can not throw, because scheduler is not initialized yet. 936 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 937 exit(1) 938 } 939 940 // Lock extra list, take head, unlock popped list. 941 // nilokay=false is safe here because of the invariant above, 942 // that the extra list always contains or will soon contain 943 // at least one m. 944 mp := lockextra(false) 945 946 // Set needextram when we've just emptied the list, 947 // so that the eventual call into cgocallbackg will 948 // allocate a new m for the extra list. We delay the 949 // allocation until then so that it can be done 950 // after exitsyscall makes sure it is okay to be 951 // running at all (that is, there's no garbage collection 952 // running right now). 953 mp.needextram = mp.schedlink == 0 954 unlockextra(mp.schedlink.ptr()) 955 956 // Install g (= m->g0) and set the stack bounds 957 // to match the current stack. We don't actually know 958 // how big the stack is, like we don't know how big any 959 // scheduling stack is, but we assume there's at least 32 kB, 960 // which is more than enough for us. 961 setg(mp.g0) 962 _g_ := getg() 963 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 964 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 965 _g_.stackguard0 = _g_.stack.lo + _StackGuard 966 967 msigsave(mp) 968 // Initialize this thread to use the m. 969 asminit() 970 minit() 971 } 972 973 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 974 975 // newextram allocates an m and puts it on the extra list. 976 // It is called with a working local m, so that it can do things 977 // like call schedlock and allocate. 978 func newextram() { 979 // Create extra goroutine locked to extra m. 980 // The goroutine is the context in which the cgo callback will run. 981 // The sched.pc will never be returned to, but setting it to 982 // goexit makes clear to the traceback routines where 983 // the goroutine stack ends. 984 mp := allocm(nil, nil) 985 gp := malg(4096) 986 gp.sched.pc = funcPC(goexit) + _PCQuantum 987 gp.sched.sp = gp.stack.hi 988 gp.sched.sp -= 4 * regSize // extra space in case of reads slightly beyond frame 989 gp.sched.lr = 0 990 gp.sched.g = guintptr(unsafe.Pointer(gp)) 991 gp.syscallpc = gp.sched.pc 992 gp.syscallsp = gp.sched.sp 993 // malg returns status as Gidle, change to Gsyscall before adding to allg 994 // where GC will see it. 995 casgstatus(gp, _Gidle, _Gsyscall) 996 gp.m = mp 997 mp.curg = gp 998 mp.locked = _LockInternal 999 mp.lockedg = gp 1000 gp.lockedm = mp 1001 gp.goid = int64(xadd64(&sched.goidgen, 1)) 1002 if raceenabled { 1003 gp.racectx = racegostart(funcPC(newextram)) 1004 } 1005 // put on allg for garbage collector 1006 allgadd(gp) 1007 1008 // Add m to the extra list. 1009 mnext := lockextra(true) 1010 mp.schedlink.set(mnext) 1011 unlockextra(mp) 1012 } 1013 1014 // dropm is called when a cgo callback has called needm but is now 1015 // done with the callback and returning back into the non-Go thread. 1016 // It puts the current m back onto the extra list. 1017 // 1018 // The main expense here is the call to signalstack to release the 1019 // m's signal stack, and then the call to needm on the next callback 1020 // from this thread. It is tempting to try to save the m for next time, 1021 // which would eliminate both these costs, but there might not be 1022 // a next time: the current thread (which Go does not control) might exit. 1023 // If we saved the m for that thread, there would be an m leak each time 1024 // such a thread exited. Instead, we acquire and release an m on each 1025 // call. These should typically not be scheduling operations, just a few 1026 // atomics, so the cost should be small. 1027 // 1028 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1029 // variable using pthread_key_create. Unlike the pthread keys we already use 1030 // on OS X, this dummy key would never be read by Go code. It would exist 1031 // only so that we could register at thread-exit-time destructor. 1032 // That destructor would put the m back onto the extra list. 1033 // This is purely a performance optimization. The current version, 1034 // in which dropm happens on each cgo call, is still correct too. 1035 // We may have to keep the current version on systems with cgo 1036 // but without pthreads, like Windows. 1037 func dropm() { 1038 // Undo whatever initialization minit did during needm. 1039 unminit() 1040 1041 // Clear m and g, and return m to the extra list. 1042 // After the call to setg we can only call nosplit functions 1043 // with no pointer manipulation. 1044 mp := getg().m 1045 mnext := lockextra(true) 1046 mp.schedlink.set(mnext) 1047 1048 setg(nil) 1049 unlockextra(mp) 1050 } 1051 1052 var extram uintptr 1053 1054 // lockextra locks the extra list and returns the list head. 1055 // The caller must unlock the list by storing a new list head 1056 // to extram. If nilokay is true, then lockextra will 1057 // return a nil list head if that's what it finds. If nilokay is false, 1058 // lockextra will keep waiting until the list head is no longer nil. 1059 //go:nosplit 1060 func lockextra(nilokay bool) *m { 1061 const locked = 1 1062 1063 for { 1064 old := atomicloaduintptr(&extram) 1065 if old == locked { 1066 yield := osyield 1067 yield() 1068 continue 1069 } 1070 if old == 0 && !nilokay { 1071 usleep(1) 1072 continue 1073 } 1074 if casuintptr(&extram, old, locked) { 1075 return (*m)(unsafe.Pointer(old)) 1076 } 1077 yield := osyield 1078 yield() 1079 continue 1080 } 1081 } 1082 1083 //go:nosplit 1084 func unlockextra(mp *m) { 1085 atomicstoreuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1086 } 1087 1088 // Create a new m. It will start off with a call to fn, or else the scheduler. 1089 // fn needs to be static and not a heap allocated closure. 1090 // May run with m.p==nil, so write barriers are not allowed. 1091 //go:nowritebarrier 1092 func newm(fn func(), _p_ *p) { 1093 mp := allocm(_p_, fn) 1094 mp.nextp.set(_p_) 1095 msigsave(mp) 1096 if iscgo { 1097 var ts cgothreadstart 1098 if _cgo_thread_start == nil { 1099 throw("_cgo_thread_start missing") 1100 } 1101 ts.g.set(mp.g0) 1102 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1103 ts.fn = unsafe.Pointer(funcPC(mstart)) 1104 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1105 return 1106 } 1107 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi)) 1108 } 1109 1110 // Stops execution of the current m until new work is available. 1111 // Returns with acquired P. 1112 func stopm() { 1113 _g_ := getg() 1114 1115 if _g_.m.locks != 0 { 1116 throw("stopm holding locks") 1117 } 1118 if _g_.m.p != 0 { 1119 throw("stopm holding p") 1120 } 1121 if _g_.m.spinning { 1122 _g_.m.spinning = false 1123 xadd(&sched.nmspinning, -1) 1124 } 1125 1126 retry: 1127 lock(&sched.lock) 1128 mput(_g_.m) 1129 unlock(&sched.lock) 1130 notesleep(&_g_.m.park) 1131 noteclear(&_g_.m.park) 1132 if _g_.m.helpgc != 0 { 1133 gchelper() 1134 _g_.m.helpgc = 0 1135 _g_.m.mcache = nil 1136 _g_.m.p = 0 1137 goto retry 1138 } 1139 acquirep(_g_.m.nextp.ptr()) 1140 _g_.m.nextp = 0 1141 } 1142 1143 func mspinning() { 1144 gp := getg() 1145 if !runqempty(gp.m.nextp.ptr()) { 1146 // Something (presumably the GC) was readied while the 1147 // runtime was starting up this M, so the M is no 1148 // longer spinning. 1149 if int32(xadd(&sched.nmspinning, -1)) < 0 { 1150 throw("mspinning: nmspinning underflowed") 1151 } 1152 } else { 1153 gp.m.spinning = true 1154 } 1155 } 1156 1157 // Schedules some M to run the p (creates an M if necessary). 1158 // If p==nil, tries to get an idle P, if no idle P's does nothing. 1159 // May run with m.p==nil, so write barriers are not allowed. 1160 //go:nowritebarrier 1161 func startm(_p_ *p, spinning bool) { 1162 lock(&sched.lock) 1163 if _p_ == nil { 1164 _p_ = pidleget() 1165 if _p_ == nil { 1166 unlock(&sched.lock) 1167 if spinning { 1168 xadd(&sched.nmspinning, -1) 1169 } 1170 return 1171 } 1172 } 1173 mp := mget() 1174 unlock(&sched.lock) 1175 if mp == nil { 1176 var fn func() 1177 if spinning { 1178 fn = mspinning 1179 } 1180 newm(fn, _p_) 1181 return 1182 } 1183 if mp.spinning { 1184 throw("startm: m is spinning") 1185 } 1186 if mp.nextp != 0 { 1187 throw("startm: m has p") 1188 } 1189 if spinning && !runqempty(_p_) { 1190 throw("startm: p has runnable gs") 1191 } 1192 mp.spinning = spinning 1193 mp.nextp.set(_p_) 1194 notewakeup(&mp.park) 1195 } 1196 1197 // Hands off P from syscall or locked M. 1198 // Always runs without a P, so write barriers are not allowed. 1199 //go:nowritebarrier 1200 func handoffp(_p_ *p) { 1201 // if it has local work, start it straight away 1202 if !runqempty(_p_) || sched.runqsize != 0 { 1203 startm(_p_, false) 1204 return 1205 } 1206 // no local work, check that there are no spinning/idle M's, 1207 // otherwise our help is not required 1208 if atomicload(&sched.nmspinning)+atomicload(&sched.npidle) == 0 && cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 1209 startm(_p_, true) 1210 return 1211 } 1212 lock(&sched.lock) 1213 if sched.gcwaiting != 0 { 1214 _p_.status = _Pgcstop 1215 sched.stopwait-- 1216 if sched.stopwait == 0 { 1217 notewakeup(&sched.stopnote) 1218 } 1219 unlock(&sched.lock) 1220 return 1221 } 1222 if _p_.runSafePointFn != 0 && cas(&_p_.runSafePointFn, 1, 0) { 1223 sched.safePointFn(_p_) 1224 sched.safePointWait-- 1225 if sched.safePointWait == 0 { 1226 notewakeup(&sched.safePointNote) 1227 } 1228 } 1229 if sched.runqsize != 0 { 1230 unlock(&sched.lock) 1231 startm(_p_, false) 1232 return 1233 } 1234 // If this is the last running P and nobody is polling network, 1235 // need to wakeup another M to poll network. 1236 if sched.npidle == uint32(gomaxprocs-1) && atomicload64(&sched.lastpoll) != 0 { 1237 unlock(&sched.lock) 1238 startm(_p_, false) 1239 return 1240 } 1241 pidleput(_p_) 1242 unlock(&sched.lock) 1243 } 1244 1245 // Tries to add one more P to execute G's. 1246 // Called when a G is made runnable (newproc, ready). 1247 func wakep() { 1248 // be conservative about spinning threads 1249 if !cas(&sched.nmspinning, 0, 1) { 1250 return 1251 } 1252 startm(nil, true) 1253 } 1254 1255 // Stops execution of the current m that is locked to a g until the g is runnable again. 1256 // Returns with acquired P. 1257 func stoplockedm() { 1258 _g_ := getg() 1259 1260 if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m { 1261 throw("stoplockedm: inconsistent locking") 1262 } 1263 if _g_.m.p != 0 { 1264 // Schedule another M to run this p. 1265 _p_ := releasep() 1266 handoffp(_p_) 1267 } 1268 incidlelocked(1) 1269 // Wait until another thread schedules lockedg again. 1270 notesleep(&_g_.m.park) 1271 noteclear(&_g_.m.park) 1272 status := readgstatus(_g_.m.lockedg) 1273 if status&^_Gscan != _Grunnable { 1274 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 1275 dumpgstatus(_g_) 1276 throw("stoplockedm: not runnable") 1277 } 1278 acquirep(_g_.m.nextp.ptr()) 1279 _g_.m.nextp = 0 1280 } 1281 1282 // Schedules the locked m to run the locked gp. 1283 // May run during STW, so write barriers are not allowed. 1284 //go:nowritebarrier 1285 func startlockedm(gp *g) { 1286 _g_ := getg() 1287 1288 mp := gp.lockedm 1289 if mp == _g_.m { 1290 throw("startlockedm: locked to me") 1291 } 1292 if mp.nextp != 0 { 1293 throw("startlockedm: m has p") 1294 } 1295 // directly handoff current P to the locked m 1296 incidlelocked(-1) 1297 _p_ := releasep() 1298 mp.nextp.set(_p_) 1299 notewakeup(&mp.park) 1300 stopm() 1301 } 1302 1303 // Stops the current m for stopTheWorld. 1304 // Returns when the world is restarted. 1305 func gcstopm() { 1306 _g_ := getg() 1307 1308 if sched.gcwaiting == 0 { 1309 throw("gcstopm: not waiting for gc") 1310 } 1311 if _g_.m.spinning { 1312 _g_.m.spinning = false 1313 xadd(&sched.nmspinning, -1) 1314 } 1315 _p_ := releasep() 1316 lock(&sched.lock) 1317 _p_.status = _Pgcstop 1318 sched.stopwait-- 1319 if sched.stopwait == 0 { 1320 notewakeup(&sched.stopnote) 1321 } 1322 unlock(&sched.lock) 1323 stopm() 1324 } 1325 1326 // Schedules gp to run on the current M. 1327 // If inheritTime is true, gp inherits the remaining time in the 1328 // current time slice. Otherwise, it starts a new time slice. 1329 // Never returns. 1330 func execute(gp *g, inheritTime bool) { 1331 _g_ := getg() 1332 1333 casgstatus(gp, _Grunnable, _Grunning) 1334 gp.waitsince = 0 1335 gp.preempt = false 1336 gp.stackguard0 = gp.stack.lo + _StackGuard 1337 if !inheritTime { 1338 _g_.m.p.ptr().schedtick++ 1339 } 1340 _g_.m.curg = gp 1341 gp.m = _g_.m 1342 1343 // Check whether the profiler needs to be turned on or off. 1344 hz := sched.profilehz 1345 if _g_.m.profilehz != hz { 1346 resetcpuprofiler(hz) 1347 } 1348 1349 if trace.enabled { 1350 // GoSysExit has to happen when we have a P, but before GoStart. 1351 // So we emit it here. 1352 if gp.syscallsp != 0 && gp.sysblocktraced { 1353 traceGoSysExit(gp.sysexitseq, gp.sysexitticks) 1354 } 1355 traceGoStart() 1356 } 1357 1358 gogo(&gp.sched) 1359 } 1360 1361 // Finds a runnable goroutine to execute. 1362 // Tries to steal from other P's, get g from global queue, poll network. 1363 func findrunnable() (gp *g, inheritTime bool) { 1364 _g_ := getg() 1365 1366 top: 1367 if sched.gcwaiting != 0 { 1368 gcstopm() 1369 goto top 1370 } 1371 if _g_.m.p.ptr().runSafePointFn != 0 { 1372 runSafePointFn() 1373 } 1374 if fingwait && fingwake { 1375 if gp := wakefing(); gp != nil { 1376 ready(gp, 0) 1377 } 1378 } 1379 1380 // local runq 1381 if gp, inheritTime := runqget(_g_.m.p.ptr()); gp != nil { 1382 return gp, inheritTime 1383 } 1384 1385 // global runq 1386 if sched.runqsize != 0 { 1387 lock(&sched.lock) 1388 gp := globrunqget(_g_.m.p.ptr(), 0) 1389 unlock(&sched.lock) 1390 if gp != nil { 1391 return gp, false 1392 } 1393 } 1394 1395 // Poll network. 1396 // This netpoll is only an optimization before we resort to stealing. 1397 // We can safely skip it if there a thread blocked in netpoll already. 1398 // If there is any kind of logical race with that blocked thread 1399 // (e.g. it has already returned from netpoll, but does not set lastpoll yet), 1400 // this thread will do blocking netpoll below anyway. 1401 if netpollinited() && sched.lastpoll != 0 { 1402 if gp := netpoll(false); gp != nil { // non-blocking 1403 // netpoll returns list of goroutines linked by schedlink. 1404 injectglist(gp.schedlink.ptr()) 1405 casgstatus(gp, _Gwaiting, _Grunnable) 1406 if trace.enabled { 1407 traceGoUnpark(gp, 0) 1408 } 1409 return gp, false 1410 } 1411 } 1412 1413 // If number of spinning M's >= number of busy P's, block. 1414 // This is necessary to prevent excessive CPU consumption 1415 // when GOMAXPROCS>>1 but the program parallelism is low. 1416 if !_g_.m.spinning && 2*atomicload(&sched.nmspinning) >= uint32(gomaxprocs)-atomicload(&sched.npidle) { // TODO: fast atomic 1417 goto stop 1418 } 1419 if !_g_.m.spinning { 1420 _g_.m.spinning = true 1421 xadd(&sched.nmspinning, 1) 1422 } 1423 // random steal from other P's 1424 for i := 0; i < int(4*gomaxprocs); i++ { 1425 if sched.gcwaiting != 0 { 1426 goto top 1427 } 1428 _p_ := allp[fastrand1()%uint32(gomaxprocs)] 1429 var gp *g 1430 if _p_ == _g_.m.p.ptr() { 1431 gp, _ = runqget(_p_) 1432 } else { 1433 stealRunNextG := i > 2*int(gomaxprocs) // first look for ready queues with more than 1 g 1434 gp = runqsteal(_g_.m.p.ptr(), _p_, stealRunNextG) 1435 } 1436 if gp != nil { 1437 return gp, false 1438 } 1439 } 1440 1441 stop: 1442 1443 // We have nothing to do. If we're in the GC mark phase and can 1444 // safely scan and blacken objects, run idle-time marking 1445 // rather than give up the P. 1446 if _p_ := _g_.m.p.ptr(); gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != nil && gcMarkWorkAvailable(_p_) { 1447 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 1448 gp := _p_.gcBgMarkWorker 1449 casgstatus(gp, _Gwaiting, _Grunnable) 1450 if trace.enabled { 1451 traceGoUnpark(gp, 0) 1452 } 1453 return gp, false 1454 } 1455 1456 // return P and block 1457 lock(&sched.lock) 1458 if sched.gcwaiting != 0 || _g_.m.p.ptr().runSafePointFn != 0 { 1459 unlock(&sched.lock) 1460 goto top 1461 } 1462 if sched.runqsize != 0 { 1463 gp := globrunqget(_g_.m.p.ptr(), 0) 1464 unlock(&sched.lock) 1465 return gp, false 1466 } 1467 _p_ := releasep() 1468 pidleput(_p_) 1469 unlock(&sched.lock) 1470 if _g_.m.spinning { 1471 _g_.m.spinning = false 1472 xadd(&sched.nmspinning, -1) 1473 } 1474 1475 // check all runqueues once again 1476 for i := 0; i < int(gomaxprocs); i++ { 1477 _p_ := allp[i] 1478 if _p_ != nil && !runqempty(_p_) { 1479 lock(&sched.lock) 1480 _p_ = pidleget() 1481 unlock(&sched.lock) 1482 if _p_ != nil { 1483 acquirep(_p_) 1484 goto top 1485 } 1486 break 1487 } 1488 } 1489 1490 // poll network 1491 if netpollinited() && xchg64(&sched.lastpoll, 0) != 0 { 1492 if _g_.m.p != 0 { 1493 throw("findrunnable: netpoll with p") 1494 } 1495 if _g_.m.spinning { 1496 throw("findrunnable: netpoll with spinning") 1497 } 1498 gp := netpoll(true) // block until new work is available 1499 atomicstore64(&sched.lastpoll, uint64(nanotime())) 1500 if gp != nil { 1501 lock(&sched.lock) 1502 _p_ = pidleget() 1503 unlock(&sched.lock) 1504 if _p_ != nil { 1505 acquirep(_p_) 1506 injectglist(gp.schedlink.ptr()) 1507 casgstatus(gp, _Gwaiting, _Grunnable) 1508 if trace.enabled { 1509 traceGoUnpark(gp, 0) 1510 } 1511 return gp, false 1512 } 1513 injectglist(gp) 1514 } 1515 } 1516 stopm() 1517 goto top 1518 } 1519 1520 func resetspinning() { 1521 _g_ := getg() 1522 1523 var nmspinning uint32 1524 if _g_.m.spinning { 1525 _g_.m.spinning = false 1526 nmspinning = xadd(&sched.nmspinning, -1) 1527 if nmspinning < 0 { 1528 throw("findrunnable: negative nmspinning") 1529 } 1530 } else { 1531 nmspinning = atomicload(&sched.nmspinning) 1532 } 1533 1534 // M wakeup policy is deliberately somewhat conservative (see nmspinning handling), 1535 // so see if we need to wakeup another P here. 1536 if nmspinning == 0 && atomicload(&sched.npidle) > 0 { 1537 wakep() 1538 } 1539 } 1540 1541 // Injects the list of runnable G's into the scheduler. 1542 // Can run concurrently with GC. 1543 func injectglist(glist *g) { 1544 if glist == nil { 1545 return 1546 } 1547 if trace.enabled { 1548 for gp := glist; gp != nil; gp = gp.schedlink.ptr() { 1549 traceGoUnpark(gp, 0) 1550 } 1551 } 1552 lock(&sched.lock) 1553 var n int 1554 for n = 0; glist != nil; n++ { 1555 gp := glist 1556 glist = gp.schedlink.ptr() 1557 casgstatus(gp, _Gwaiting, _Grunnable) 1558 globrunqput(gp) 1559 } 1560 unlock(&sched.lock) 1561 for ; n != 0 && sched.npidle != 0; n-- { 1562 startm(nil, false) 1563 } 1564 } 1565 1566 // One round of scheduler: find a runnable goroutine and execute it. 1567 // Never returns. 1568 func schedule() { 1569 _g_ := getg() 1570 1571 if _g_.m.locks != 0 { 1572 throw("schedule: holding locks") 1573 } 1574 1575 if _g_.m.lockedg != nil { 1576 stoplockedm() 1577 execute(_g_.m.lockedg, false) // Never returns. 1578 } 1579 1580 top: 1581 if sched.gcwaiting != 0 { 1582 gcstopm() 1583 goto top 1584 } 1585 if _g_.m.p.ptr().runSafePointFn != 0 { 1586 runSafePointFn() 1587 } 1588 1589 var gp *g 1590 var inheritTime bool 1591 if trace.enabled || trace.shutdown { 1592 gp = traceReader() 1593 if gp != nil { 1594 casgstatus(gp, _Gwaiting, _Grunnable) 1595 traceGoUnpark(gp, 0) 1596 resetspinning() 1597 } 1598 } 1599 if gp == nil && gcBlackenEnabled != 0 { 1600 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 1601 if gp != nil { 1602 resetspinning() 1603 } 1604 } 1605 if gp == nil { 1606 // Check the global runnable queue once in a while to ensure fairness. 1607 // Otherwise two goroutines can completely occupy the local runqueue 1608 // by constantly respawning each other. 1609 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 1610 lock(&sched.lock) 1611 gp = globrunqget(_g_.m.p.ptr(), 1) 1612 unlock(&sched.lock) 1613 if gp != nil { 1614 resetspinning() 1615 } 1616 } 1617 } 1618 if gp == nil { 1619 gp, inheritTime = runqget(_g_.m.p.ptr()) 1620 if gp != nil && _g_.m.spinning { 1621 throw("schedule: spinning with local work") 1622 } 1623 } 1624 if gp == nil { 1625 gp, inheritTime = findrunnable() // blocks until work is available 1626 resetspinning() 1627 } 1628 1629 if gp.lockedm != nil { 1630 // Hands off own p to the locked m, 1631 // then blocks waiting for a new p. 1632 startlockedm(gp) 1633 goto top 1634 } 1635 1636 execute(gp, inheritTime) 1637 } 1638 1639 // dropg removes the association between m and the current goroutine m->curg (gp for short). 1640 // Typically a caller sets gp's status away from Grunning and then 1641 // immediately calls dropg to finish the job. The caller is also responsible 1642 // for arranging that gp will be restarted using ready at an 1643 // appropriate time. After calling dropg and arranging for gp to be 1644 // readied later, the caller can do other work but eventually should 1645 // call schedule to restart the scheduling of goroutines on this m. 1646 func dropg() { 1647 _g_ := getg() 1648 1649 if _g_.m.lockedg == nil { 1650 _g_.m.curg.m = nil 1651 _g_.m.curg = nil 1652 } 1653 } 1654 1655 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 1656 unlock((*mutex)(lock)) 1657 return true 1658 } 1659 1660 // park continuation on g0. 1661 func park_m(gp *g) { 1662 _g_ := getg() 1663 1664 if trace.enabled { 1665 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp) 1666 } 1667 1668 casgstatus(gp, _Grunning, _Gwaiting) 1669 dropg() 1670 1671 if _g_.m.waitunlockf != nil { 1672 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf)) 1673 ok := fn(gp, _g_.m.waitlock) 1674 _g_.m.waitunlockf = nil 1675 _g_.m.waitlock = nil 1676 if !ok { 1677 if trace.enabled { 1678 traceGoUnpark(gp, 2) 1679 } 1680 casgstatus(gp, _Gwaiting, _Grunnable) 1681 execute(gp, true) // Schedule it back, never returns. 1682 } 1683 } 1684 schedule() 1685 } 1686 1687 func goschedImpl(gp *g) { 1688 status := readgstatus(gp) 1689 if status&^_Gscan != _Grunning { 1690 dumpgstatus(gp) 1691 throw("bad g status") 1692 } 1693 casgstatus(gp, _Grunning, _Grunnable) 1694 dropg() 1695 lock(&sched.lock) 1696 globrunqput(gp) 1697 unlock(&sched.lock) 1698 1699 schedule() 1700 } 1701 1702 // Gosched continuation on g0. 1703 func gosched_m(gp *g) { 1704 if trace.enabled { 1705 traceGoSched() 1706 } 1707 goschedImpl(gp) 1708 } 1709 1710 func gopreempt_m(gp *g) { 1711 if trace.enabled { 1712 traceGoPreempt() 1713 } 1714 goschedImpl(gp) 1715 } 1716 1717 // Finishes execution of the current goroutine. 1718 func goexit1() { 1719 if raceenabled { 1720 racegoend() 1721 } 1722 if trace.enabled { 1723 traceGoEnd() 1724 } 1725 mcall(goexit0) 1726 } 1727 1728 // goexit continuation on g0. 1729 func goexit0(gp *g) { 1730 _g_ := getg() 1731 1732 casgstatus(gp, _Grunning, _Gdead) 1733 gp.m = nil 1734 gp.lockedm = nil 1735 _g_.m.lockedg = nil 1736 gp.paniconfault = false 1737 gp._defer = nil // should be true already but just in case. 1738 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 1739 gp.writebuf = nil 1740 gp.waitreason = "" 1741 gp.param = nil 1742 1743 dropg() 1744 1745 if _g_.m.locked&^_LockExternal != 0 { 1746 print("invalid m->locked = ", _g_.m.locked, "\n") 1747 throw("internal lockOSThread error") 1748 } 1749 _g_.m.locked = 0 1750 gfput(_g_.m.p.ptr(), gp) 1751 schedule() 1752 } 1753 1754 //go:nosplit 1755 //go:nowritebarrier 1756 func save(pc, sp uintptr) { 1757 _g_ := getg() 1758 1759 _g_.sched.pc = pc 1760 _g_.sched.sp = sp 1761 _g_.sched.lr = 0 1762 _g_.sched.ret = 0 1763 _g_.sched.ctxt = nil 1764 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 1765 } 1766 1767 // The goroutine g is about to enter a system call. 1768 // Record that it's not using the cpu anymore. 1769 // This is called only from the go syscall library and cgocall, 1770 // not from the low-level system calls used by the 1771 // 1772 // Entersyscall cannot split the stack: the gosave must 1773 // make g->sched refer to the caller's stack segment, because 1774 // entersyscall is going to return immediately after. 1775 // 1776 // Nothing entersyscall calls can split the stack either. 1777 // We cannot safely move the stack during an active call to syscall, 1778 // because we do not know which of the uintptr arguments are 1779 // really pointers (back into the stack). 1780 // In practice, this means that we make the fast path run through 1781 // entersyscall doing no-split things, and the slow path has to use systemstack 1782 // to run bigger things on the system stack. 1783 // 1784 // reentersyscall is the entry point used by cgo callbacks, where explicitly 1785 // saved SP and PC are restored. This is needed when exitsyscall will be called 1786 // from a function further up in the call stack than the parent, as g->syscallsp 1787 // must always point to a valid stack frame. entersyscall below is the normal 1788 // entry point for syscalls, which obtains the SP and PC from the caller. 1789 // 1790 // Syscall tracing: 1791 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 1792 // If the syscall does not block, that is it, we do not emit any other events. 1793 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 1794 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 1795 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 1796 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 1797 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 1798 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 1799 // and we wait for the increment before emitting traceGoSysExit. 1800 // Note that the increment is done even if tracing is not enabled, 1801 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 1802 // 1803 //go:nosplit 1804 func reentersyscall(pc, sp uintptr) { 1805 _g_ := getg() 1806 1807 // Disable preemption because during this function g is in Gsyscall status, 1808 // but can have inconsistent g->sched, do not let GC observe it. 1809 _g_.m.locks++ 1810 1811 if trace.enabled { 1812 systemstack(traceGoSysCall) 1813 } 1814 1815 // Entersyscall must not call any function that might split/grow the stack. 1816 // (See details in comment above.) 1817 // Catch calls that might, by replacing the stack guard with something that 1818 // will trip any stack check and leaving a flag to tell newstack to die. 1819 _g_.stackguard0 = stackPreempt 1820 _g_.throwsplit = true 1821 1822 // Leave SP around for GC and traceback. 1823 save(pc, sp) 1824 _g_.syscallsp = sp 1825 _g_.syscallpc = pc 1826 casgstatus(_g_, _Grunning, _Gsyscall) 1827 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 1828 systemstack(func() { 1829 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 1830 throw("entersyscall") 1831 }) 1832 } 1833 1834 if atomicload(&sched.sysmonwait) != 0 { // TODO: fast atomic 1835 systemstack(entersyscall_sysmon) 1836 save(pc, sp) 1837 } 1838 1839 if _g_.m.p.ptr().runSafePointFn != 0 { 1840 // runSafePointFn may stack split if run on this stack 1841 systemstack(runSafePointFn) 1842 save(pc, sp) 1843 } 1844 1845 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 1846 _g_.sysblocktraced = true 1847 _g_.m.mcache = nil 1848 _g_.m.p.ptr().m = 0 1849 atomicstore(&_g_.m.p.ptr().status, _Psyscall) 1850 if sched.gcwaiting != 0 { 1851 systemstack(entersyscall_gcwait) 1852 save(pc, sp) 1853 } 1854 1855 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). 1856 // We set _StackGuard to StackPreempt so that first split stack check calls morestack. 1857 // Morestack detects this case and throws. 1858 _g_.stackguard0 = stackPreempt 1859 _g_.m.locks-- 1860 } 1861 1862 // Standard syscall entry used by the go syscall library and normal cgo calls. 1863 //go:nosplit 1864 func entersyscall(dummy int32) { 1865 reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 1866 } 1867 1868 func entersyscall_sysmon() { 1869 lock(&sched.lock) 1870 if atomicload(&sched.sysmonwait) != 0 { 1871 atomicstore(&sched.sysmonwait, 0) 1872 notewakeup(&sched.sysmonnote) 1873 } 1874 unlock(&sched.lock) 1875 } 1876 1877 func entersyscall_gcwait() { 1878 _g_ := getg() 1879 _p_ := _g_.m.p.ptr() 1880 1881 lock(&sched.lock) 1882 if sched.stopwait > 0 && cas(&_p_.status, _Psyscall, _Pgcstop) { 1883 if trace.enabled { 1884 traceGoSysBlock(_p_) 1885 traceProcStop(_p_) 1886 } 1887 _p_.syscalltick++ 1888 if sched.stopwait--; sched.stopwait == 0 { 1889 notewakeup(&sched.stopnote) 1890 } 1891 } 1892 unlock(&sched.lock) 1893 } 1894 1895 // The same as entersyscall(), but with a hint that the syscall is blocking. 1896 //go:nosplit 1897 func entersyscallblock(dummy int32) { 1898 _g_ := getg() 1899 1900 _g_.m.locks++ // see comment in entersyscall 1901 _g_.throwsplit = true 1902 _g_.stackguard0 = stackPreempt // see comment in entersyscall 1903 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 1904 _g_.sysblocktraced = true 1905 _g_.m.p.ptr().syscalltick++ 1906 1907 // Leave SP around for GC and traceback. 1908 pc := getcallerpc(unsafe.Pointer(&dummy)) 1909 sp := getcallersp(unsafe.Pointer(&dummy)) 1910 save(pc, sp) 1911 _g_.syscallsp = _g_.sched.sp 1912 _g_.syscallpc = _g_.sched.pc 1913 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 1914 sp1 := sp 1915 sp2 := _g_.sched.sp 1916 sp3 := _g_.syscallsp 1917 systemstack(func() { 1918 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 1919 throw("entersyscallblock") 1920 }) 1921 } 1922 casgstatus(_g_, _Grunning, _Gsyscall) 1923 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 1924 systemstack(func() { 1925 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 1926 throw("entersyscallblock") 1927 }) 1928 } 1929 1930 systemstack(entersyscallblock_handoff) 1931 1932 // Resave for traceback during blocked call. 1933 save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 1934 1935 _g_.m.locks-- 1936 } 1937 1938 func entersyscallblock_handoff() { 1939 if trace.enabled { 1940 traceGoSysCall() 1941 traceGoSysBlock(getg().m.p.ptr()) 1942 } 1943 handoffp(releasep()) 1944 } 1945 1946 // The goroutine g exited its system call. 1947 // Arrange for it to run on a cpu again. 1948 // This is called only from the go syscall library, not 1949 // from the low-level system calls used by the 1950 //go:nosplit 1951 func exitsyscall(dummy int32) { 1952 _g_ := getg() 1953 1954 _g_.m.locks++ // see comment in entersyscall 1955 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp { 1956 throw("exitsyscall: syscall frame is no longer valid") 1957 } 1958 1959 _g_.waitsince = 0 1960 oldp := _g_.m.p.ptr() 1961 if exitsyscallfast() { 1962 if _g_.m.mcache == nil { 1963 throw("lost mcache") 1964 } 1965 if trace.enabled { 1966 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 1967 systemstack(traceGoStart) 1968 } 1969 } 1970 // There's a cpu for us, so we can run. 1971 _g_.m.p.ptr().syscalltick++ 1972 // We need to cas the status and scan before resuming... 1973 casgstatus(_g_, _Gsyscall, _Grunning) 1974 1975 // Garbage collector isn't running (since we are), 1976 // so okay to clear syscallsp. 1977 _g_.syscallsp = 0 1978 _g_.m.locks-- 1979 if _g_.preempt { 1980 // restore the preemption request in case we've cleared it in newstack 1981 _g_.stackguard0 = stackPreempt 1982 } else { 1983 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 1984 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1985 } 1986 _g_.throwsplit = false 1987 return 1988 } 1989 1990 _g_.sysexitticks = 0 1991 _g_.sysexitseq = 0 1992 if trace.enabled { 1993 // Wait till traceGoSysBlock event is emitted. 1994 // This ensures consistency of the trace (the goroutine is started after it is blocked). 1995 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 1996 osyield() 1997 } 1998 // We can't trace syscall exit right now because we don't have a P. 1999 // Tracing code can invoke write barriers that cannot run without a P. 2000 // So instead we remember the syscall exit time and emit the event 2001 // in execute when we have a P. 2002 _g_.sysexitseq, _g_.sysexitticks = tracestamp() 2003 } 2004 2005 _g_.m.locks-- 2006 2007 // Call the scheduler. 2008 mcall(exitsyscall0) 2009 2010 if _g_.m.mcache == nil { 2011 throw("lost mcache") 2012 } 2013 2014 // Scheduler returned, so we're allowed to run now. 2015 // Delete the syscallsp information that we left for 2016 // the garbage collector during the system call. 2017 // Must wait until now because until gosched returns 2018 // we don't know for sure that the garbage collector 2019 // is not running. 2020 _g_.syscallsp = 0 2021 _g_.m.p.ptr().syscalltick++ 2022 _g_.throwsplit = false 2023 } 2024 2025 //go:nosplit 2026 func exitsyscallfast() bool { 2027 _g_ := getg() 2028 2029 // Freezetheworld sets stopwait but does not retake P's. 2030 if sched.stopwait == freezeStopWait { 2031 _g_.m.mcache = nil 2032 _g_.m.p = 0 2033 return false 2034 } 2035 2036 // Try to re-acquire the last P. 2037 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) { 2038 // There's a cpu for us, so we can run. 2039 _g_.m.mcache = _g_.m.p.ptr().mcache 2040 _g_.m.p.ptr().m.set(_g_.m) 2041 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2042 if trace.enabled { 2043 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 2044 // traceGoSysBlock for this syscall was already emitted, 2045 // but here we effectively retake the p from the new syscall running on the same p. 2046 systemstack(func() { 2047 // Denote blocking of the new syscall. 2048 traceGoSysBlock(_g_.m.p.ptr()) 2049 // Denote completion of the current syscall. 2050 traceGoSysExit(tracestamp()) 2051 }) 2052 } 2053 _g_.m.p.ptr().syscalltick++ 2054 } 2055 return true 2056 } 2057 2058 // Try to get any other idle P. 2059 oldp := _g_.m.p.ptr() 2060 _g_.m.mcache = nil 2061 _g_.m.p = 0 2062 if sched.pidle != 0 { 2063 var ok bool 2064 systemstack(func() { 2065 ok = exitsyscallfast_pidle() 2066 if ok && trace.enabled { 2067 if oldp != nil { 2068 // Wait till traceGoSysBlock event is emitted. 2069 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2070 for oldp.syscalltick == _g_.m.syscalltick { 2071 osyield() 2072 } 2073 } 2074 traceGoSysExit(tracestamp()) 2075 } 2076 }) 2077 if ok { 2078 return true 2079 } 2080 } 2081 return false 2082 } 2083 2084 func exitsyscallfast_pidle() bool { 2085 lock(&sched.lock) 2086 _p_ := pidleget() 2087 if _p_ != nil && atomicload(&sched.sysmonwait) != 0 { 2088 atomicstore(&sched.sysmonwait, 0) 2089 notewakeup(&sched.sysmonnote) 2090 } 2091 unlock(&sched.lock) 2092 if _p_ != nil { 2093 acquirep(_p_) 2094 return true 2095 } 2096 return false 2097 } 2098 2099 // exitsyscall slow path on g0. 2100 // Failed to acquire P, enqueue gp as runnable. 2101 func exitsyscall0(gp *g) { 2102 _g_ := getg() 2103 2104 casgstatus(gp, _Gsyscall, _Grunnable) 2105 dropg() 2106 lock(&sched.lock) 2107 _p_ := pidleget() 2108 if _p_ == nil { 2109 globrunqput(gp) 2110 } else if atomicload(&sched.sysmonwait) != 0 { 2111 atomicstore(&sched.sysmonwait, 0) 2112 notewakeup(&sched.sysmonnote) 2113 } 2114 unlock(&sched.lock) 2115 if _p_ != nil { 2116 acquirep(_p_) 2117 execute(gp, false) // Never returns. 2118 } 2119 if _g_.m.lockedg != nil { 2120 // Wait until another thread schedules gp and so m again. 2121 stoplockedm() 2122 execute(gp, false) // Never returns. 2123 } 2124 stopm() 2125 schedule() // Never returns. 2126 } 2127 2128 func beforefork() { 2129 gp := getg().m.curg 2130 2131 // Fork can hang if preempted with signals frequently enough (see issue 5517). 2132 // Ensure that we stay on the same M where we disable profiling. 2133 gp.m.locks++ 2134 if gp.m.profilehz != 0 { 2135 resetcpuprofiler(0) 2136 } 2137 2138 // This function is called before fork in syscall package. 2139 // Code between fork and exec must not allocate memory nor even try to grow stack. 2140 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 2141 // runtime_AfterFork will undo this in parent process, but not in child. 2142 gp.stackguard0 = stackFork 2143 } 2144 2145 // Called from syscall package before fork. 2146 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 2147 //go:nosplit 2148 func syscall_runtime_BeforeFork() { 2149 systemstack(beforefork) 2150 } 2151 2152 func afterfork() { 2153 gp := getg().m.curg 2154 2155 // See the comment in beforefork. 2156 gp.stackguard0 = gp.stack.lo + _StackGuard 2157 2158 hz := sched.profilehz 2159 if hz != 0 { 2160 resetcpuprofiler(hz) 2161 } 2162 gp.m.locks-- 2163 } 2164 2165 // Called from syscall package after fork in parent. 2166 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 2167 //go:nosplit 2168 func syscall_runtime_AfterFork() { 2169 systemstack(afterfork) 2170 } 2171 2172 // Allocate a new g, with a stack big enough for stacksize bytes. 2173 func malg(stacksize int32) *g { 2174 newg := new(g) 2175 if stacksize >= 0 { 2176 stacksize = round2(_StackSystem + stacksize) 2177 systemstack(func() { 2178 newg.stack, newg.stkbar = stackalloc(uint32(stacksize)) 2179 }) 2180 newg.stackguard0 = newg.stack.lo + _StackGuard 2181 newg.stackguard1 = ^uintptr(0) 2182 newg.stackAlloc = uintptr(stacksize) 2183 } 2184 return newg 2185 } 2186 2187 // Create a new g running fn with siz bytes of arguments. 2188 // Put it on the queue of g's waiting to run. 2189 // The compiler turns a go statement into a call to this. 2190 // Cannot split the stack because it assumes that the arguments 2191 // are available sequentially after &fn; they would not be 2192 // copied if a stack split occurred. 2193 // Go关键字的底层实现 2194 //go:nosplit 2195 func newproc(siz int32, fn *funcval) { 2196 argp := add(unsafe.Pointer(&fn), ptrSize) 2197 pc := getcallerpc(unsafe.Pointer(&siz)) 2198 systemstack(func() { 2199 newproc1(fn, (*uint8)(argp), siz, 0, pc) 2200 }) 2201 } 2202 2203 // Create a new g running fn with narg bytes of arguments starting 2204 // at argp and returning nret bytes of results. callerpc is the 2205 // address of the go statement that created this. The new g is put 2206 // on the queue of g's waiting to run. 2207 func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g { 2208 _g_ := getg() 2209 2210 if fn == nil { 2211 _g_.m.throwing = -1 // do not dump full stacks 2212 throw("go of nil func value") 2213 } 2214 _g_.m.locks++ // disable preemption because it can be holding p in a local var 2215 siz := narg + nret 2216 siz = (siz + 7) &^ 7 2217 2218 // We could allocate a larger initial stack if necessary. 2219 // Not worth it: this is almost always an error. 2220 // 4*sizeof(uintreg): extra space added below 2221 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 2222 if siz >= _StackMin-4*regSize-regSize { 2223 throw("newproc: function arguments too large for new goroutine") 2224 } 2225 2226 _p_ := _g_.m.p.ptr() 2227 newg := gfget(_p_) 2228 if newg == nil { 2229 newg = malg(_StackMin) 2230 casgstatus(newg, _Gidle, _Gdead) 2231 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 2232 } 2233 if newg.stack.hi == 0 { 2234 throw("newproc1: newg missing stack") 2235 } 2236 2237 if readgstatus(newg) != _Gdead { 2238 throw("newproc1: new g is not Gdead") 2239 } 2240 2241 totalSize := 4*regSize + uintptr(siz) // extra space in case of reads slightly beyond frame 2242 if hasLinkRegister { 2243 totalSize += ptrSize 2244 } 2245 totalSize += -totalSize & (spAlign - 1) // align to spAlign 2246 sp := newg.stack.hi - totalSize 2247 spArg := sp 2248 if hasLinkRegister { 2249 // caller's LR 2250 *(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil 2251 spArg += ptrSize 2252 } 2253 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) 2254 2255 memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 2256 newg.sched.sp = sp 2257 newg.sched.pc = funcPC(goexit) + _PCQuantum // +PCQuantum so that previous instruction is in same function 2258 newg.sched.g = guintptr(unsafe.Pointer(newg)) 2259 gostartcallfn(&newg.sched, fn) 2260 newg.gopc = callerpc 2261 newg.startpc = fn.fn 2262 casgstatus(newg, _Gdead, _Grunnable) 2263 2264 if _p_.goidcache == _p_.goidcacheend { 2265 // Sched.goidgen is the last allocated id, 2266 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 2267 // At startup sched.goidgen=0, so main goroutine receives goid=1. 2268 _p_.goidcache = xadd64(&sched.goidgen, _GoidCacheBatch) 2269 _p_.goidcache -= _GoidCacheBatch - 1 2270 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 2271 } 2272 newg.goid = int64(_p_.goidcache) 2273 _p_.goidcache++ 2274 if raceenabled { 2275 newg.racectx = racegostart(callerpc) 2276 } 2277 if trace.enabled { 2278 traceGoCreate(newg, newg.startpc) 2279 } 2280 runqput(_p_, newg, true) 2281 2282 if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic 2283 wakep() 2284 } 2285 _g_.m.locks-- 2286 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 2287 _g_.stackguard0 = stackPreempt 2288 } 2289 return newg 2290 } 2291 2292 // Put on gfree list. 2293 // If local list is too long, transfer a batch to the global list. 2294 func gfput(_p_ *p, gp *g) { 2295 if readgstatus(gp) != _Gdead { 2296 throw("gfput: bad status (not Gdead)") 2297 } 2298 2299 stksize := gp.stackAlloc 2300 2301 if stksize != _FixedStack { 2302 // non-standard stack size - free it. 2303 stackfree(gp.stack, gp.stackAlloc) 2304 gp.stack.lo = 0 2305 gp.stack.hi = 0 2306 gp.stackguard0 = 0 2307 gp.stkbar = nil 2308 gp.stkbarPos = 0 2309 } else { 2310 // Reset stack barriers. 2311 gp.stkbar = gp.stkbar[:0] 2312 gp.stkbarPos = 0 2313 } 2314 2315 gp.schedlink.set(_p_.gfree) 2316 _p_.gfree = gp 2317 _p_.gfreecnt++ 2318 if _p_.gfreecnt >= 64 { 2319 lock(&sched.gflock) 2320 for _p_.gfreecnt >= 32 { 2321 _p_.gfreecnt-- 2322 gp = _p_.gfree 2323 _p_.gfree = gp.schedlink.ptr() 2324 gp.schedlink.set(sched.gfree) 2325 sched.gfree = gp 2326 sched.ngfree++ 2327 } 2328 unlock(&sched.gflock) 2329 } 2330 } 2331 2332 // Get from gfree list. 2333 // If local list is empty, grab a batch from global list. 2334 func gfget(_p_ *p) *g { 2335 retry: 2336 gp := _p_.gfree 2337 if gp == nil && sched.gfree != nil { 2338 lock(&sched.gflock) 2339 for _p_.gfreecnt < 32 && sched.gfree != nil { 2340 _p_.gfreecnt++ 2341 gp = sched.gfree 2342 sched.gfree = gp.schedlink.ptr() 2343 sched.ngfree-- 2344 gp.schedlink.set(_p_.gfree) 2345 _p_.gfree = gp 2346 } 2347 unlock(&sched.gflock) 2348 goto retry 2349 } 2350 if gp != nil { 2351 _p_.gfree = gp.schedlink.ptr() 2352 _p_.gfreecnt-- 2353 if gp.stack.lo == 0 { 2354 // Stack was deallocated in gfput. Allocate a new one. 2355 systemstack(func() { 2356 gp.stack, gp.stkbar = stackalloc(_FixedStack) 2357 }) 2358 gp.stackguard0 = gp.stack.lo + _StackGuard 2359 gp.stackAlloc = _FixedStack 2360 } else { 2361 if raceenabled { 2362 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) 2363 } 2364 } 2365 } 2366 return gp 2367 } 2368 2369 // Purge all cached G's from gfree list to the global list. 2370 func gfpurge(_p_ *p) { 2371 lock(&sched.gflock) 2372 for _p_.gfreecnt != 0 { 2373 _p_.gfreecnt-- 2374 gp := _p_.gfree 2375 _p_.gfree = gp.schedlink.ptr() 2376 gp.schedlink.set(sched.gfree) 2377 sched.gfree = gp 2378 sched.ngfree++ 2379 } 2380 unlock(&sched.gflock) 2381 } 2382 2383 // Breakpoint executes a breakpoint trap. 2384 func Breakpoint() { 2385 breakpoint() 2386 } 2387 2388 // dolockOSThread is called by LockOSThread and lockOSThread below 2389 // after they modify m.locked. Do not allow preemption during this call, 2390 // or else the m might be different in this function than in the caller. 2391 //go:nosplit 2392 func dolockOSThread() { 2393 _g_ := getg() 2394 _g_.m.lockedg = _g_ 2395 _g_.lockedm = _g_.m 2396 } 2397 2398 //go:nosplit 2399 2400 // LockOSThread wires the calling goroutine to its current operating system thread. 2401 // Until the calling goroutine exits or calls UnlockOSThread, it will always 2402 // execute in that thread, and no other goroutine can. 2403 func LockOSThread() { 2404 getg().m.locked |= _LockExternal 2405 dolockOSThread() 2406 } 2407 2408 //go:nosplit 2409 func lockOSThread() { 2410 getg().m.locked += _LockInternal 2411 dolockOSThread() 2412 } 2413 2414 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 2415 // after they update m->locked. Do not allow preemption during this call, 2416 // or else the m might be in different in this function than in the caller. 2417 //go:nosplit 2418 func dounlockOSThread() { 2419 _g_ := getg() 2420 if _g_.m.locked != 0 { 2421 return 2422 } 2423 _g_.m.lockedg = nil 2424 _g_.lockedm = nil 2425 } 2426 2427 //go:nosplit 2428 2429 // UnlockOSThread unwires the calling goroutine from its fixed operating system thread. 2430 // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op. 2431 func UnlockOSThread() { 2432 getg().m.locked &^= _LockExternal 2433 dounlockOSThread() 2434 } 2435 2436 //go:nosplit 2437 func unlockOSThread() { 2438 _g_ := getg() 2439 if _g_.m.locked < _LockInternal { 2440 systemstack(badunlockosthread) 2441 } 2442 _g_.m.locked -= _LockInternal 2443 dounlockOSThread() 2444 } 2445 2446 func badunlockosthread() { 2447 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 2448 } 2449 2450 func gcount() int32 { 2451 n := int32(allglen) - sched.ngfree 2452 for i := 0; ; i++ { 2453 _p_ := allp[i] 2454 if _p_ == nil { 2455 break 2456 } 2457 n -= _p_.gfreecnt 2458 } 2459 2460 // All these variables can be changed concurrently, so the result can be inconsistent. 2461 // But at least the current goroutine is running. 2462 if n < 1 { 2463 n = 1 2464 } 2465 return n 2466 } 2467 2468 func mcount() int32 { 2469 return sched.mcount 2470 } 2471 2472 var prof struct { 2473 lock uint32 2474 hz int32 2475 } 2476 2477 func _System() { _System() } 2478 func _ExternalCode() { _ExternalCode() } 2479 func _GC() { _GC() } 2480 2481 // Called if we receive a SIGPROF signal. 2482 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 2483 if prof.hz == 0 { 2484 return 2485 } 2486 2487 // Profiling runs concurrently with GC, so it must not allocate. 2488 mp.mallocing++ 2489 2490 // Coordinate with stack barrier insertion in scanstack. 2491 for !cas(&gp.stackLock, 0, 1) { 2492 osyield() 2493 } 2494 2495 // Define that a "user g" is a user-created goroutine, and a "system g" 2496 // is one that is m->g0 or m->gsignal. 2497 // 2498 // We might be interrupted for profiling halfway through a 2499 // goroutine switch. The switch involves updating three (or four) values: 2500 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 2501 // because once it gets updated the new g is running. 2502 // 2503 // When switching from a user g to a system g, LR is not considered live, 2504 // so the update only affects g, SP, and PC. Since PC must be last, there 2505 // the possible partial transitions in ordinary execution are (1) g alone is updated, 2506 // (2) both g and SP are updated, and (3) SP alone is updated. 2507 // If SP or g alone is updated, we can detect the partial transition by checking 2508 // whether the SP is within g's stack bounds. (We could also require that SP 2509 // be changed only after g, but the stack bounds check is needed by other 2510 // cases, so there is no need to impose an additional requirement.) 2511 // 2512 // There is one exceptional transition to a system g, not in ordinary execution. 2513 // When a signal arrives, the operating system starts the signal handler running 2514 // with an updated PC and SP. The g is updated last, at the beginning of the 2515 // handler. There are two reasons this is okay. First, until g is updated the 2516 // g and SP do not match, so the stack bounds check detects the partial transition. 2517 // Second, signal handlers currently run with signals disabled, so a profiling 2518 // signal cannot arrive during the handler. 2519 // 2520 // When switching from a system g to a user g, there are three possibilities. 2521 // 2522 // First, it may be that the g switch has no PC update, because the SP 2523 // either corresponds to a user g throughout (as in asmcgocall) 2524 // or because it has been arranged to look like a user g frame 2525 // (as in cgocallback_gofunc). In this case, since the entire 2526 // transition is a g+SP update, a partial transition updating just one of 2527 // those will be detected by the stack bounds check. 2528 // 2529 // Second, when returning from a signal handler, the PC and SP updates 2530 // are performed by the operating system in an atomic update, so the g 2531 // update must be done before them. The stack bounds check detects 2532 // the partial transition here, and (again) signal handlers run with signals 2533 // disabled, so a profiling signal cannot arrive then anyway. 2534 // 2535 // Third, the common case: it may be that the switch updates g, SP, and PC 2536 // separately. If the PC is within any of the functions that does this, 2537 // we don't ask for a traceback. C.F. the function setsSP for more about this. 2538 // 2539 // There is another apparently viable approach, recorded here in case 2540 // the "PC within setsSP function" check turns out not to be usable. 2541 // It would be possible to delay the update of either g or SP until immediately 2542 // before the PC update instruction. Then, because of the stack bounds check, 2543 // the only problematic interrupt point is just before that PC update instruction, 2544 // and the sigprof handler can detect that instruction and simulate stepping past 2545 // it in order to reach a consistent state. On ARM, the update of g must be made 2546 // in two places (in R10 and also in a TLS slot), so the delayed update would 2547 // need to be the SP update. The sigprof handler must read the instruction at 2548 // the current PC and if it was the known instruction (for example, JMP BX or 2549 // MOV R2, PC), use that other register in place of the PC value. 2550 // The biggest drawback to this solution is that it requires that we can tell 2551 // whether it's safe to read from the memory pointed at by PC. 2552 // In a correct program, we can test PC == nil and otherwise read, 2553 // but if a profiling signal happens at the instant that a program executes 2554 // a bad jump (before the program manages to handle the resulting fault) 2555 // the profiling handler could fault trying to read nonexistent memory. 2556 // 2557 // To recap, there are no constraints on the assembly being used for the 2558 // transition. We simply require that g and SP match and that the PC is not 2559 // in gogo. 2560 traceback := true 2561 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) { 2562 traceback = false 2563 } 2564 var stk [maxCPUProfStack]uintptr 2565 n := 0 2566 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 2567 // Cgo, we can't unwind and symbolize arbitrary C code, 2568 // so instead collect Go stack that leads to the cgo call. 2569 // This is especially important on windows, since all syscalls are cgo calls. 2570 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0) 2571 } else if traceback { 2572 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 2573 } 2574 if !traceback || n <= 0 { 2575 // Normal traceback is impossible or has failed. 2576 // See if it falls into several common cases. 2577 n = 0 2578 if GOOS == "windows" && n == 0 && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 2579 // Libcall, i.e. runtime syscall on windows. 2580 // Collect Go stack that leads to the call. 2581 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 2582 } 2583 if n == 0 { 2584 // If all of the above has failed, account it against abstract "System" or "GC". 2585 n = 2 2586 // "ExternalCode" is better than "etext". 2587 if pc > firstmoduledata.etext { 2588 pc = funcPC(_ExternalCode) + _PCQuantum 2589 } 2590 stk[0] = pc 2591 if mp.preemptoff != "" || mp.helpgc != 0 { 2592 stk[1] = funcPC(_GC) + _PCQuantum 2593 } else { 2594 stk[1] = funcPC(_System) + _PCQuantum 2595 } 2596 } 2597 } 2598 atomicstore(&gp.stackLock, 0) 2599 2600 if prof.hz != 0 { 2601 // Simple cas-lock to coordinate with setcpuprofilerate. 2602 for !cas(&prof.lock, 0, 1) { 2603 osyield() 2604 } 2605 if prof.hz != 0 { 2606 cpuprof.add(stk[:n]) 2607 } 2608 atomicstore(&prof.lock, 0) 2609 } 2610 mp.mallocing-- 2611 } 2612 2613 // Reports whether a function will set the SP 2614 // to an absolute value. Important that 2615 // we don't traceback when these are at the bottom 2616 // of the stack since we can't be sure that we will 2617 // find the caller. 2618 // 2619 // If the function is not on the bottom of the stack 2620 // we assume that it will have set it up so that traceback will be consistent, 2621 // either by being a traceback terminating function 2622 // or putting one on the stack at the right offset. 2623 func setsSP(pc uintptr) bool { 2624 f := findfunc(pc) 2625 if f == nil { 2626 // couldn't find the function for this PC, 2627 // so assume the worst and stop traceback 2628 return true 2629 } 2630 switch f.entry { 2631 case gogoPC, systemstackPC, mcallPC, morestackPC: 2632 return true 2633 } 2634 return false 2635 } 2636 2637 // Arrange to call fn with a traceback hz times a second. 2638 func setcpuprofilerate_m(hz int32) { 2639 // Force sane arguments. 2640 if hz < 0 { 2641 hz = 0 2642 } 2643 2644 // Disable preemption, otherwise we can be rescheduled to another thread 2645 // that has profiling enabled. 2646 _g_ := getg() 2647 _g_.m.locks++ 2648 2649 // Stop profiler on this thread so that it is safe to lock prof. 2650 // if a profiling signal came in while we had prof locked, 2651 // it would deadlock. 2652 resetcpuprofiler(0) 2653 2654 for !cas(&prof.lock, 0, 1) { 2655 osyield() 2656 } 2657 prof.hz = hz 2658 atomicstore(&prof.lock, 0) 2659 2660 lock(&sched.lock) 2661 sched.profilehz = hz 2662 unlock(&sched.lock) 2663 2664 if hz != 0 { 2665 resetcpuprofiler(hz) 2666 } 2667 2668 _g_.m.locks-- 2669 } 2670 2671 // Change number of processors. The world is stopped, sched is locked. 2672 // gcworkbufs are not being modified by either the GC or 2673 // the write barrier code. 2674 // Returns list of Ps with local work, they need to be scheduled by the caller. 2675 func procresize(nprocs int32) *p { 2676 old := gomaxprocs 2677 if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs { 2678 throw("procresize: invalid arg") 2679 } 2680 // TODO trace的实现 2681 if trace.enabled { 2682 traceGomaxprocs(nprocs) 2683 } 2684 2685 // update statistics 2686 now := nanotime() 2687 if sched.procresizetime != 0 { 2688 sched.totaltime += int64(old) * (now - sched.procresizetime) 2689 } 2690 sched.procresizetime = now 2691 2692 // initialize new P's 2693 for i := int32(0); i < nprocs; i++ { 2694 pp := allp[i] 2695 if pp == nil { 2696 pp = new(p) 2697 pp.id = i 2698 pp.status = _Pgcstop 2699 pp.sudogcache = pp.sudogbuf[:0] 2700 for i := range pp.deferpool { 2701 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 2702 } 2703 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 2704 } 2705 if pp.mcache == nil { 2706 if old == 0 && i == 0 { 2707 if getg().m.mcache == nil { 2708 throw("missing mcache?") 2709 } 2710 pp.mcache = getg().m.mcache // bootstrap 2711 } else { 2712 pp.mcache = allocmcache() 2713 } 2714 } 2715 } 2716 2717 // free unused P's 2718 for i := nprocs; i < old; i++ { 2719 p := allp[i] 2720 if trace.enabled { 2721 if p == getg().m.p.ptr() { 2722 // moving to p[0], pretend that we were descheduled 2723 // and then scheduled again to keep the trace sane. 2724 traceGoSched() 2725 traceProcStop(p) 2726 } 2727 } 2728 // move all runnable goroutines to the global queue 2729 for p.runqhead != p.runqtail { 2730 // pop from tail of local queue 2731 p.runqtail-- 2732 gp := p.runq[p.runqtail%uint32(len(p.runq))] 2733 // push onto head of global queue 2734 globrunqputhead(gp) 2735 } 2736 if p.runnext != 0 { 2737 globrunqputhead(p.runnext.ptr()) 2738 p.runnext = 0 2739 } 2740 // if there's a background worker, make it runnable and put 2741 // it on the global queue so it can clean itself up 2742 if p.gcBgMarkWorker != nil { 2743 casgstatus(p.gcBgMarkWorker, _Gwaiting, _Grunnable) 2744 if trace.enabled { 2745 traceGoUnpark(p.gcBgMarkWorker, 0) 2746 } 2747 globrunqput(p.gcBgMarkWorker) 2748 p.gcBgMarkWorker = nil 2749 } 2750 for i := range p.sudogbuf { 2751 p.sudogbuf[i] = nil 2752 } 2753 p.sudogcache = p.sudogbuf[:0] 2754 for i := range p.deferpool { 2755 for j := range p.deferpoolbuf[i] { 2756 p.deferpoolbuf[i][j] = nil 2757 } 2758 p.deferpool[i] = p.deferpoolbuf[i][:0] 2759 } 2760 freemcache(p.mcache) 2761 p.mcache = nil 2762 gfpurge(p) 2763 traceProcFree(p) 2764 p.status = _Pdead 2765 // can't free P itself because it can be referenced by an M in syscall 2766 } 2767 2768 _g_ := getg() 2769 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 2770 // continue to use the current P 2771 _g_.m.p.ptr().status = _Prunning 2772 } else { 2773 // release the current P and acquire allp[0] 2774 if _g_.m.p != 0 { 2775 _g_.m.p.ptr().m = 0 2776 } 2777 _g_.m.p = 0 2778 _g_.m.mcache = nil 2779 p := allp[0] 2780 p.m = 0 2781 p.status = _Pidle 2782 acquirep(p) 2783 if trace.enabled { 2784 traceGoStart() 2785 } 2786 } 2787 var runnablePs *p 2788 for i := nprocs - 1; i >= 0; i-- { 2789 p := allp[i] 2790 if _g_.m.p.ptr() == p { 2791 continue 2792 } 2793 p.status = _Pidle 2794 if runqempty(p) { 2795 pidleput(p) 2796 } else { 2797 p.m.set(mget()) 2798 p.link.set(runnablePs) 2799 runnablePs = p 2800 } 2801 } 2802 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 2803 atomicstore((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 2804 return runnablePs 2805 } 2806 2807 // Associate p and the current m. 2808 func acquirep(_p_ *p) { 2809 acquirep1(_p_) 2810 2811 // have p; write barriers now allowed 2812 _g_ := getg() 2813 _g_.m.mcache = _p_.mcache 2814 2815 if trace.enabled { 2816 traceProcStart() 2817 } 2818 } 2819 2820 // May run during STW, so write barriers are not allowed. 2821 //go:nowritebarrier 2822 func acquirep1(_p_ *p) { 2823 _g_ := getg() 2824 2825 if _g_.m.p != 0 || _g_.m.mcache != nil { 2826 throw("acquirep: already in go") 2827 } 2828 if _p_.m != 0 || _p_.status != _Pidle { 2829 id := int32(0) 2830 if _p_.m != 0 { 2831 id = _p_.m.ptr().id 2832 } 2833 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 2834 throw("acquirep: invalid p state") 2835 } 2836 _g_.m.p.set(_p_) 2837 _p_.m.set(_g_.m) 2838 _p_.status = _Prunning 2839 } 2840 2841 // Disassociate p and the current m. 2842 func releasep() *p { 2843 _g_ := getg() 2844 2845 if _g_.m.p == 0 || _g_.m.mcache == nil { 2846 throw("releasep: invalid arg") 2847 } 2848 _p_ := _g_.m.p.ptr() 2849 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 2850 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 2851 throw("releasep: invalid p state") 2852 } 2853 if trace.enabled { 2854 traceProcStop(_g_.m.p.ptr()) 2855 } 2856 _g_.m.p = 0 2857 _g_.m.mcache = nil 2858 _p_.m = 0 2859 _p_.status = _Pidle 2860 return _p_ 2861 } 2862 2863 func incidlelocked(v int32) { 2864 lock(&sched.lock) 2865 sched.nmidlelocked += v 2866 if v > 0 { 2867 checkdead() 2868 } 2869 unlock(&sched.lock) 2870 } 2871 2872 // Check for deadlock situation. 2873 // The check is based on number of running M's, if 0 -> deadlock. 2874 func checkdead() { 2875 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 2876 // there are no running goroutines. The calling program is 2877 // assumed to be running. 2878 if islibrary || isarchive { 2879 return 2880 } 2881 2882 // If we are dying because of a signal caught on an already idle thread, 2883 // freezetheworld will cause all running threads to block. 2884 // And runtime will essentially enter into deadlock state, 2885 // except that there is a thread that will call exit soon. 2886 if panicking > 0 { 2887 return 2888 } 2889 2890 // -1 for sysmon 2891 run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1 2892 if run > 0 { 2893 return 2894 } 2895 if run < 0 { 2896 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n") 2897 throw("checkdead: inconsistent counts") 2898 } 2899 2900 grunning := 0 2901 lock(&allglock) 2902 for i := 0; i < len(allgs); i++ { 2903 gp := allgs[i] 2904 if isSystemGoroutine(gp) { 2905 continue 2906 } 2907 s := readgstatus(gp) 2908 switch s &^ _Gscan { 2909 case _Gwaiting: 2910 grunning++ 2911 case _Grunnable, 2912 _Grunning, 2913 _Gsyscall: 2914 unlock(&allglock) 2915 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 2916 throw("checkdead: runnable g") 2917 } 2918 } 2919 unlock(&allglock) 2920 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 2921 throw("no goroutines (main called runtime.Goexit) - deadlock!") 2922 } 2923 2924 // Maybe jump time forward for playground. 2925 gp := timejump() 2926 if gp != nil { 2927 casgstatus(gp, _Gwaiting, _Grunnable) 2928 globrunqput(gp) 2929 _p_ := pidleget() 2930 if _p_ == nil { 2931 throw("checkdead: no p for timer") 2932 } 2933 mp := mget() 2934 if mp == nil { 2935 newm(nil, _p_) 2936 } else { 2937 mp.nextp.set(_p_) 2938 notewakeup(&mp.park) 2939 } 2940 return 2941 } 2942 2943 getg().m.throwing = -1 // do not dump full stacks 2944 throw("all goroutines are asleep - deadlock!") 2945 } 2946 2947 func sysmon() { 2948 // If we go two minutes without a garbage collection, force one to run. 2949 forcegcperiod := int64(2 * 60 * 1e9) 2950 2951 // If a heap span goes unused for 5 minutes after a garbage collection, 2952 // we hand it back to the operating system. 2953 scavengelimit := int64(5 * 60 * 1e9) 2954 2955 if debug.scavenge > 0 { 2956 // Scavenge-a-lot for testing. 2957 forcegcperiod = 10 * 1e6 2958 scavengelimit = 20 * 1e6 2959 } 2960 2961 lastscavenge := nanotime() 2962 nscavenge := 0 2963 2964 // Make wake-up period small enough for the sampling to be correct. 2965 maxsleep := forcegcperiod / 2 2966 if scavengelimit < forcegcperiod { 2967 maxsleep = scavengelimit / 2 2968 } 2969 2970 lasttrace := int64(0) 2971 idle := 0 // how many cycles in succession we had not wokeup somebody 2972 delay := uint32(0) 2973 for { 2974 if idle == 0 { // start with 20us sleep... 2975 delay = 20 2976 } else if idle > 50 { // start doubling the sleep after 1ms... 2977 delay *= 2 2978 } 2979 if delay > 10*1000 { // up to 10ms 2980 delay = 10 * 1000 2981 } 2982 usleep(delay) 2983 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic 2984 lock(&sched.lock) 2985 if atomicload(&sched.gcwaiting) != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs) { 2986 atomicstore(&sched.sysmonwait, 1) 2987 unlock(&sched.lock) 2988 notetsleep(&sched.sysmonnote, maxsleep) 2989 lock(&sched.lock) 2990 atomicstore(&sched.sysmonwait, 0) 2991 noteclear(&sched.sysmonnote) 2992 idle = 0 2993 delay = 20 2994 } 2995 unlock(&sched.lock) 2996 } 2997 // poll network if not polled for more than 10ms 2998 lastpoll := int64(atomicload64(&sched.lastpoll)) 2999 now := nanotime() 3000 unixnow := unixnanotime() 3001 if lastpoll != 0 && lastpoll+10*1000*1000 < now { 3002 cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 3003 gp := netpoll(false) // non-blocking - returns list of goroutines 3004 if gp != nil { 3005 // Need to decrement number of idle locked M's 3006 // (pretending that one more is running) before injectglist. 3007 // Otherwise it can lead to the following situation: 3008 // injectglist grabs all P's but before it starts M's to run the P's, 3009 // another M returns from syscall, finishes running its G, 3010 // observes that there is no work to do and no other running M's 3011 // and reports deadlock. 3012 incidlelocked(-1) 3013 injectglist(gp) 3014 incidlelocked(1) 3015 } 3016 } 3017 // retake P's blocked in syscalls 3018 // and preempt long running G's 3019 if retake(now) != 0 { 3020 idle = 0 3021 } else { 3022 idle++ 3023 } 3024 // check if we need to force a GC 3025 lastgc := int64(atomicload64(&memstats.last_gc)) 3026 if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomicload(&forcegc.idle) != 0 && atomicloaduint(&bggc.working) == 0 { 3027 lock(&forcegc.lock) 3028 forcegc.idle = 0 3029 forcegc.g.schedlink = 0 3030 injectglist(forcegc.g) 3031 unlock(&forcegc.lock) 3032 } 3033 // scavenge heap once in a while 3034 if lastscavenge+scavengelimit/2 < now { 3035 mHeap_Scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) 3036 lastscavenge = now 3037 nscavenge++ 3038 } 3039 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace*1000000) <= now { 3040 lasttrace = now 3041 schedtrace(debug.scheddetail > 0) 3042 } 3043 } 3044 } 3045 3046 var pdesc [_MaxGomaxprocs]struct { 3047 schedtick uint32 3048 schedwhen int64 3049 syscalltick uint32 3050 syscallwhen int64 3051 } 3052 3053 // forcePreemptNS is the time slice given to a G before it is 3054 // preempted. 3055 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 3056 3057 func retake(now int64) uint32 { 3058 n := 0 3059 for i := int32(0); i < gomaxprocs; i++ { 3060 _p_ := allp[i] 3061 if _p_ == nil { 3062 continue 3063 } 3064 pd := &pdesc[i] 3065 s := _p_.status 3066 if s == _Psyscall { 3067 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 3068 t := int64(_p_.syscalltick) 3069 if int64(pd.syscalltick) != t { 3070 pd.syscalltick = uint32(t) 3071 pd.syscallwhen = now 3072 continue 3073 } 3074 // On the one hand we don't want to retake Ps if there is no other work to do, 3075 // but on the other hand we want to retake them eventually 3076 // because they can prevent the sysmon thread from deep sleep. 3077 if runqempty(_p_) && atomicload(&sched.nmspinning)+atomicload(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 3078 continue 3079 } 3080 // Need to decrement number of idle locked M's 3081 // (pretending that one more is running) before the CAS. 3082 // Otherwise the M from which we retake can exit the syscall, 3083 // increment nmidle and report deadlock. 3084 incidlelocked(-1) 3085 if cas(&_p_.status, s, _Pidle) { 3086 if trace.enabled { 3087 traceGoSysBlock(_p_) 3088 traceProcStop(_p_) 3089 } 3090 n++ 3091 _p_.syscalltick++ 3092 handoffp(_p_) 3093 } 3094 incidlelocked(1) 3095 } else if s == _Prunning { 3096 // Preempt G if it's running for too long. 3097 t := int64(_p_.schedtick) 3098 if int64(pd.schedtick) != t { 3099 pd.schedtick = uint32(t) 3100 pd.schedwhen = now 3101 continue 3102 } 3103 if pd.schedwhen+forcePreemptNS > now { 3104 continue 3105 } 3106 preemptone(_p_) 3107 } 3108 } 3109 return uint32(n) 3110 } 3111 3112 // Tell all goroutines that they have been preempted and they should stop. 3113 // This function is purely best-effort. It can fail to inform a goroutine if a 3114 // processor just started running it. 3115 // No locks need to be held. 3116 // Returns true if preemption request was issued to at least one goroutine. 3117 func preemptall() bool { 3118 res := false 3119 for i := int32(0); i < gomaxprocs; i++ { 3120 _p_ := allp[i] 3121 if _p_ == nil || _p_.status != _Prunning { 3122 continue 3123 } 3124 if preemptone(_p_) { 3125 res = true 3126 } 3127 } 3128 return res 3129 } 3130 3131 // Tell the goroutine running on processor P to stop. 3132 // This function is purely best-effort. It can incorrectly fail to inform the 3133 // goroutine. It can send inform the wrong goroutine. Even if it informs the 3134 // correct goroutine, that goroutine might ignore the request if it is 3135 // simultaneously executing newstack. 3136 // No lock needs to be held. 3137 // Returns true if preemption request was issued. 3138 // The actual preemption will happen at some point in the future 3139 // and will be indicated by the gp->status no longer being 3140 // Grunning 3141 func preemptone(_p_ *p) bool { 3142 mp := _p_.m.ptr() 3143 if mp == nil || mp == getg().m { 3144 return false 3145 } 3146 gp := mp.curg 3147 if gp == nil || gp == mp.g0 { 3148 return false 3149 } 3150 3151 gp.preempt = true 3152 3153 // Every call in a go routine checks for stack overflow by 3154 // comparing the current stack pointer to gp->stackguard0. 3155 // Setting gp->stackguard0 to StackPreempt folds 3156 // preemption into the normal stack overflow check. 3157 gp.stackguard0 = stackPreempt 3158 return true 3159 } 3160 3161 var starttime int64 3162 3163 func schedtrace(detailed bool) { 3164 now := nanotime() 3165 if starttime == 0 { 3166 starttime = now 3167 } 3168 3169 lock(&sched.lock) 3170 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 3171 if detailed { 3172 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 3173 } 3174 // We must be careful while reading data from P's, M's and G's. 3175 // Even if we hold schedlock, most data can be changed concurrently. 3176 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 3177 for i := int32(0); i < gomaxprocs; i++ { 3178 _p_ := allp[i] 3179 if _p_ == nil { 3180 continue 3181 } 3182 mp := _p_.m.ptr() 3183 h := atomicload(&_p_.runqhead) 3184 t := atomicload(&_p_.runqtail) 3185 if detailed { 3186 id := int32(-1) 3187 if mp != nil { 3188 id = mp.id 3189 } 3190 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") 3191 } else { 3192 // In non-detailed mode format lengths of per-P run queues as: 3193 // [len1 len2 len3 len4] 3194 print(" ") 3195 if i == 0 { 3196 print("[") 3197 } 3198 print(t - h) 3199 if i == gomaxprocs-1 { 3200 print("]\n") 3201 } 3202 } 3203 } 3204 3205 if !detailed { 3206 unlock(&sched.lock) 3207 return 3208 } 3209 3210 for mp := allm; mp != nil; mp = mp.alllink { 3211 _p_ := mp.p.ptr() 3212 gp := mp.curg 3213 lockedg := mp.lockedg 3214 id1 := int32(-1) 3215 if _p_ != nil { 3216 id1 = _p_.id 3217 } 3218 id2 := int64(-1) 3219 if gp != nil { 3220 id2 = gp.goid 3221 } 3222 id3 := int64(-1) 3223 if lockedg != nil { 3224 id3 = lockedg.goid 3225 } 3226 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", getg().m.blocked, " lockedg=", id3, "\n") 3227 } 3228 3229 lock(&allglock) 3230 for gi := 0; gi < len(allgs); gi++ { 3231 gp := allgs[gi] 3232 mp := gp.m 3233 lockedm := gp.lockedm 3234 id1 := int32(-1) 3235 if mp != nil { 3236 id1 = mp.id 3237 } 3238 id2 := int32(-1) 3239 if lockedm != nil { 3240 id2 = lockedm.id 3241 } 3242 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n") 3243 } 3244 unlock(&allglock) 3245 unlock(&sched.lock) 3246 } 3247 3248 // Put mp on midle list. 3249 // Sched must be locked. 3250 // May run during STW, so write barriers are not allowed. 3251 //go:nowritebarrier 3252 func mput(mp *m) { 3253 mp.schedlink = sched.midle 3254 sched.midle.set(mp) 3255 sched.nmidle++ 3256 checkdead() 3257 } 3258 3259 // Try to get an m from midle list. 3260 // Sched must be locked. 3261 // May run during STW, so write barriers are not allowed. 3262 //go:nowritebarrier 3263 func mget() *m { 3264 mp := sched.midle.ptr() 3265 if mp != nil { 3266 sched.midle = mp.schedlink 3267 sched.nmidle-- 3268 } 3269 return mp 3270 } 3271 3272 // Put gp on the global runnable queue. 3273 // Sched must be locked. 3274 // May run during STW, so write barriers are not allowed. 3275 //go:nowritebarrier 3276 func globrunqput(gp *g) { 3277 gp.schedlink = 0 3278 if sched.runqtail != 0 { 3279 sched.runqtail.ptr().schedlink.set(gp) 3280 } else { 3281 sched.runqhead.set(gp) 3282 } 3283 sched.runqtail.set(gp) 3284 sched.runqsize++ 3285 } 3286 3287 // Put gp at the head of the global runnable queue. 3288 // Sched must be locked. 3289 // May run during STW, so write barriers are not allowed. 3290 //go:nowritebarrier 3291 func globrunqputhead(gp *g) { 3292 gp.schedlink = sched.runqhead 3293 sched.runqhead.set(gp) 3294 if sched.runqtail == 0 { 3295 sched.runqtail.set(gp) 3296 } 3297 sched.runqsize++ 3298 } 3299 3300 // Put a batch of runnable goroutines on the global runnable queue. 3301 // Sched must be locked. 3302 func globrunqputbatch(ghead *g, gtail *g, n int32) { 3303 gtail.schedlink = 0 3304 if sched.runqtail != 0 { 3305 sched.runqtail.ptr().schedlink.set(ghead) 3306 } else { 3307 sched.runqhead.set(ghead) 3308 } 3309 sched.runqtail.set(gtail) 3310 sched.runqsize += n 3311 } 3312 3313 // Try get a batch of G's from the global runnable queue. 3314 // Sched must be locked. 3315 func globrunqget(_p_ *p, max int32) *g { 3316 if sched.runqsize == 0 { 3317 return nil 3318 } 3319 3320 n := sched.runqsize/gomaxprocs + 1 3321 if n > sched.runqsize { 3322 n = sched.runqsize 3323 } 3324 if max > 0 && n > max { 3325 n = max 3326 } 3327 if n > int32(len(_p_.runq))/2 { 3328 n = int32(len(_p_.runq)) / 2 3329 } 3330 3331 sched.runqsize -= n 3332 if sched.runqsize == 0 { 3333 sched.runqtail = 0 3334 } 3335 3336 gp := sched.runqhead.ptr() 3337 sched.runqhead = gp.schedlink 3338 n-- 3339 for ; n > 0; n-- { 3340 gp1 := sched.runqhead.ptr() 3341 sched.runqhead = gp1.schedlink 3342 runqput(_p_, gp1, false) 3343 } 3344 return gp 3345 } 3346 3347 // Put p to on _Pidle list. 3348 // Sched must be locked. 3349 // May run during STW, so write barriers are not allowed. 3350 //go:nowritebarrier 3351 func pidleput(_p_ *p) { 3352 if !runqempty(_p_) { 3353 throw("pidleput: P has non-empty run queue") 3354 } 3355 _p_.link = sched.pidle 3356 sched.pidle.set(_p_) 3357 xadd(&sched.npidle, 1) // TODO: fast atomic 3358 } 3359 3360 // Try get a p from _Pidle list. 3361 // Sched must be locked. 3362 // May run during STW, so write barriers are not allowed. 3363 //go:nowritebarrier 3364 func pidleget() *p { 3365 _p_ := sched.pidle.ptr() 3366 if _p_ != nil { 3367 sched.pidle = _p_.link 3368 xadd(&sched.npidle, -1) // TODO: fast atomic 3369 } 3370 return _p_ 3371 } 3372 3373 // runqempty returns true if _p_ has no Gs on its local run queue. 3374 // Note that this test is generally racy. 3375 func runqempty(_p_ *p) bool { 3376 return _p_.runqhead == _p_.runqtail && _p_.runnext == 0 3377 } 3378 3379 // To shake out latent assumptions about scheduling order, 3380 // we introduce some randomness into scheduling decisions 3381 // when running with the race detector. 3382 // The need for this was made obvious by changing the 3383 // (deterministic) scheduling order in Go 1.5 and breaking 3384 // many poorly-written tests. 3385 // With the randomness here, as long as the tests pass 3386 // consistently with -race, they shouldn't have latent scheduling 3387 // assumptions. 3388 const randomizeScheduler = raceenabled 3389 3390 // runqput tries to put g on the local runnable queue. 3391 // If next if false, runqput adds g to the tail of the runnable queue. 3392 // If next is true, runqput puts g in the _p_.runnext slot. 3393 // If the run queue is full, runnext puts g on the global queue. 3394 // Executed only by the owner P. 3395 func runqput(_p_ *p, gp *g, next bool) { 3396 if randomizeScheduler && next && fastrand1()%2 == 0 { 3397 next = false 3398 } 3399 3400 if next { 3401 retryNext: 3402 oldnext := _p_.runnext 3403 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 3404 goto retryNext 3405 } 3406 if oldnext == 0 { 3407 return 3408 } 3409 // Kick the old runnext out to the regular run queue. 3410 gp = oldnext.ptr() 3411 } 3412 3413 retry: 3414 h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers 3415 t := _p_.runqtail 3416 if t-h < uint32(len(_p_.runq)) { 3417 _p_.runq[t%uint32(len(_p_.runq))] = gp 3418 atomicstore(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 3419 return 3420 } 3421 if runqputslow(_p_, gp, h, t) { 3422 return 3423 } 3424 // the queue is not full, now the put above must suceed 3425 goto retry 3426 } 3427 3428 // Put g and a batch of work from local runnable queue on global queue. 3429 // Executed only by the owner P. 3430 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 3431 var batch [len(_p_.runq)/2 + 1]*g 3432 3433 // First, grab a batch from local queue. 3434 n := t - h 3435 n = n / 2 3436 if n != uint32(len(_p_.runq)/2) { 3437 throw("runqputslow: queue is not full") 3438 } 3439 for i := uint32(0); i < n; i++ { 3440 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))] 3441 } 3442 if !cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 3443 return false 3444 } 3445 batch[n] = gp 3446 3447 if randomizeScheduler { 3448 for i := uint32(1); i <= n; i++ { 3449 j := fastrand1() % (i + 1) 3450 batch[i], batch[j] = batch[j], batch[i] 3451 } 3452 } 3453 3454 // Link the goroutines. 3455 for i := uint32(0); i < n; i++ { 3456 batch[i].schedlink.set(batch[i+1]) 3457 } 3458 3459 // Now put the batch on global queue. 3460 lock(&sched.lock) 3461 globrunqputbatch(batch[0], batch[n], int32(n+1)) 3462 unlock(&sched.lock) 3463 return true 3464 } 3465 3466 // Get g from local runnable queue. 3467 // If inheritTime is true, gp should inherit the remaining time in the 3468 // current time slice. Otherwise, it should start a new time slice. 3469 // Executed only by the owner P. 3470 func runqget(_p_ *p) (gp *g, inheritTime bool) { 3471 // If there's a runnext, it's the next G to run. 3472 for { 3473 next := _p_.runnext 3474 if next == 0 { 3475 break 3476 } 3477 if _p_.runnext.cas(next, 0) { 3478 return next.ptr(), true 3479 } 3480 } 3481 3482 for { 3483 h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers 3484 t := _p_.runqtail 3485 if t == h { 3486 return nil, false 3487 } 3488 gp := _p_.runq[h%uint32(len(_p_.runq))] 3489 if cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume 3490 return gp, false 3491 } 3492 } 3493 } 3494 3495 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 3496 // Batch is a ring buffer starting at batchHead. 3497 // Returns number of grabbed goroutines. 3498 // Can be executed by any P. 3499 func runqgrab(_p_ *p, batch *[256]*g, batchHead uint32, stealRunNextG bool) uint32 { 3500 for { 3501 h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers 3502 t := atomicload(&_p_.runqtail) // load-acquire, synchronize with the producer 3503 n := t - h 3504 n = n - n/2 3505 if n == 0 { 3506 if stealRunNextG { 3507 // Try to steal from _p_.runnext. 3508 if next := _p_.runnext; next != 0 { 3509 // Sleep to ensure that _p_ isn't about to run the g we 3510 // are about to steal. 3511 // The important use case here is when the g running on _p_ 3512 // ready()s another g and then almost immediately blocks. 3513 // Instead of stealing runnext in this window, back off 3514 // to give _p_ a chance to schedule runnext. This will avoid 3515 // thrashing gs between different Ps. 3516 usleep(100) 3517 if !_p_.runnext.cas(next, 0) { 3518 continue 3519 } 3520 batch[batchHead%uint32(len(batch))] = next.ptr() 3521 return 1 3522 } 3523 } 3524 return 0 3525 } 3526 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 3527 continue 3528 } 3529 for i := uint32(0); i < n; i++ { 3530 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 3531 batch[(batchHead+i)%uint32(len(batch))] = g 3532 } 3533 if cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 3534 return n 3535 } 3536 } 3537 } 3538 3539 // Steal half of elements from local runnable queue of p2 3540 // and put onto local runnable queue of p. 3541 // Returns one of the stolen elements (or nil if failed). 3542 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 3543 t := _p_.runqtail 3544 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 3545 if n == 0 { 3546 return nil 3547 } 3548 n-- 3549 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))] 3550 if n == 0 { 3551 return gp 3552 } 3553 h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers 3554 if t-h+n >= uint32(len(_p_.runq)) { 3555 throw("runqsteal: runq overflow") 3556 } 3557 atomicstore(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 3558 return gp 3559 } 3560 3561 func testSchedLocalQueue() { 3562 _p_ := new(p) 3563 gs := make([]g, len(_p_.runq)) 3564 for i := 0; i < len(_p_.runq); i++ { 3565 if g, _ := runqget(_p_); g != nil { 3566 throw("runq is not empty initially") 3567 } 3568 for j := 0; j < i; j++ { 3569 runqput(_p_, &gs[i], false) 3570 } 3571 for j := 0; j < i; j++ { 3572 if g, _ := runqget(_p_); g != &gs[i] { 3573 print("bad element at iter ", i, "/", j, "\n") 3574 throw("bad element") 3575 } 3576 } 3577 if g, _ := runqget(_p_); g != nil { 3578 throw("runq is not empty afterwards") 3579 } 3580 } 3581 } 3582 3583 func testSchedLocalQueueSteal() { 3584 p1 := new(p) 3585 p2 := new(p) 3586 gs := make([]g, len(p1.runq)) 3587 for i := 0; i < len(p1.runq); i++ { 3588 for j := 0; j < i; j++ { 3589 gs[j].sig = 0 3590 runqput(p1, &gs[j], false) 3591 } 3592 gp := runqsteal(p2, p1, true) 3593 s := 0 3594 if gp != nil { 3595 s++ 3596 gp.sig++ 3597 } 3598 for { 3599 gp, _ = runqget(p2) 3600 if gp == nil { 3601 break 3602 } 3603 s++ 3604 gp.sig++ 3605 } 3606 for { 3607 gp, _ = runqget(p1) 3608 if gp == nil { 3609 break 3610 } 3611 gp.sig++ 3612 } 3613 for j := 0; j < i; j++ { 3614 if gs[j].sig != 1 { 3615 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n") 3616 throw("bad element") 3617 } 3618 } 3619 if s != i/2 && s != i/2+1 { 3620 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n") 3621 throw("bad steal") 3622 } 3623 } 3624 } 3625 3626 func setMaxThreads(in int) (out int) { 3627 lock(&sched.lock) 3628 out = int(sched.maxmcount) 3629 sched.maxmcount = int32(in) 3630 checkmcount() 3631 unlock(&sched.lock) 3632 return 3633 } 3634 3635 func haveexperiment(name string) bool { 3636 x := goexperiment 3637 for x != "" { 3638 xname := "" 3639 i := index(x, ",") 3640 if i < 0 { 3641 xname, x = x, "" 3642 } else { 3643 xname, x = x[:i], x[i+1:] 3644 } 3645 if xname == name { 3646 return true 3647 } 3648 } 3649 return false 3650 } 3651 3652 //go:nosplit 3653 func procPin() int { 3654 _g_ := getg() 3655 mp := _g_.m 3656 3657 mp.locks++ 3658 return int(mp.p.ptr().id) 3659 } 3660 3661 //go:nosplit 3662 func procUnpin() { 3663 _g_ := getg() 3664 _g_.m.locks-- 3665 } 3666 3667 //go:linkname sync_runtime_procPin sync.runtime_procPin 3668 //go:nosplit 3669 func sync_runtime_procPin() int { 3670 return procPin() 3671 } 3672 3673 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 3674 //go:nosplit 3675 func sync_runtime_procUnpin() { 3676 procUnpin() 3677 } 3678 3679 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 3680 //go:nosplit 3681 func sync_atomic_runtime_procPin() int { 3682 return procPin() 3683 } 3684 3685 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 3686 //go:nosplit 3687 func sync_atomic_runtime_procUnpin() { 3688 procUnpin() 3689 } 3690 3691 // Active spinning for sync.Mutex. 3692 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 3693 //go:nosplit 3694 func sync_runtime_canSpin(i int) bool { 3695 // sync.Mutex is cooperative, so we are conservative with spinning. 3696 // Spin only few times and only if running on a multicore machine and 3697 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 3698 // As opposed to runtime mutex we don't do passive spinning here, 3699 // because there can be work on global runq on on other Ps. 3700 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 3701 return false 3702 } 3703 if p := getg().m.p.ptr(); !runqempty(p) { 3704 return false 3705 } 3706 return true 3707 } 3708 3709 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 3710 //go:nosplit 3711 func sync_runtime_doSpin() { 3712 procyield(active_spin_cnt) 3713 }