github.com/mh-cbon/go@v0.0.0-20160603070303-9e112a3fe4c0/src/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 var buildVersion = sys.TheVersion 14 15 // Goroutine scheduler 16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 17 // 18 // The main concepts are: 19 // G - goroutine. 20 // M - worker thread, or machine. 21 // P - processor, a resource that is required to execute Go code. 22 // M must have an associated P to execute Go code, however it can be 23 // blocked or in a syscall w/o an associated P. 24 // 25 // Design doc at https://golang.org/s/go11sched. 26 27 // Worker thread parking/unparking. 28 // We need to balance between keeping enough running worker threads to utilize 29 // available hardware parallelism and parking excessive running worker threads 30 // to conserve CPU resources and power. This is not simple for two reasons: 31 // (1) scheduler state is intentionally distributed (in particular, per-P work 32 // queues), so it is not possible to compute global predicates on fast paths; 33 // (2) for optimal thread management we would need to know the future (don't park 34 // a worker thread when a new goroutine will be readied in near future). 35 // 36 // Three rejected approaches that would work badly: 37 // 1. Centralize all scheduler state (would inhibit scalability). 38 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there 39 // is a spare P, unpark a thread and handoff it the thread and the goroutine. 40 // This would lead to thread state thrashing, as the thread that readied the 41 // goroutine can be out of work the very next moment, we will need to park it. 42 // Also, it would destroy locality of computation as we want to preserve 43 // dependent goroutines on the same thread; and introduce additional latency. 44 // 3. Unpark an additional thread whenever we ready a goroutine and there is an 45 // idle P, but don't do handoff. This would lead to excessive thread parking/ 46 // unparking as the additional threads will instantly park without discovering 47 // any work to do. 48 // 49 // The current approach: 50 // We unpark an additional thread when we ready a goroutine if (1) there is an 51 // idle P and there are no "spinning" worker threads. A worker thread is considered 52 // spinning if it is out of local work and did not find work in global run queue/ 53 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. 54 // Threads unparked this way are also considered spinning; we don't do goroutine 55 // handoff so such threads are out of work initially. Spinning threads do some 56 // spinning looking for work in per-P run queues before parking. If a spinning 57 // thread finds work it takes itself out of the spinning state and proceeds to 58 // execution. If it does not find work it takes itself out of the spinning state 59 // and then parks. 60 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark 61 // new threads when readying goroutines. To compensate for that, if the last spinning 62 // thread finds work and stops spinning, it must unpark a new spinning thread. 63 // This approach smooths out unjustified spikes of thread unparking, 64 // but at the same time guarantees eventual maximal CPU parallelism utilization. 65 // 66 // The main implementation complication is that we need to be very careful during 67 // spinning->non-spinning thread transition. This transition can race with submission 68 // of a new goroutine, and either one part or another needs to unpark another worker 69 // thread. If they both fail to do that, we can end up with semi-persistent CPU 70 // underutilization. The general pattern for goroutine readying is: submit a goroutine 71 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. 72 // The general pattern for spinning->non-spinning transition is: decrement nmspinning, 73 // #StoreLoad-style memory barrier, check all per-P work queues for new work. 74 // Note that all this complexity does not apply to global run queue as we are not 75 // sloppy about thread unparking when submitting to global queue. Also see comments 76 // for nmspinning manipulation. 77 78 var ( 79 m0 m 80 g0 g 81 raceprocctx0 uintptr 82 ) 83 84 //go:linkname runtime_init runtime.init 85 func runtime_init() 86 87 //go:linkname main_init main.init 88 func main_init() 89 90 // main_init_done is a signal used by cgocallbackg that initialization 91 // has been completed. It is made before _cgo_notify_runtime_init_done, 92 // so all cgo calls can rely on it existing. When main_init is complete, 93 // it is closed, meaning cgocallbackg can reliably receive from it. 94 var main_init_done chan bool 95 96 //go:linkname main_main main.main 97 func main_main() 98 99 // runtimeInitTime is the nanotime() at which the runtime started. 100 var runtimeInitTime int64 101 102 // Value to use for signal mask for newly created M's. 103 var initSigmask sigset 104 105 // The main goroutine. 106 func main() { 107 g := getg() 108 109 // Racectx of m0->g0 is used only as the parent of the main goroutine. 110 // It must not be used for anything else. 111 g.m.g0.racectx = 0 112 113 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 114 // Using decimal instead of binary GB and MB because 115 // they look nicer in the stack overflow failure message. 116 if sys.PtrSize == 8 { 117 maxstacksize = 1000000000 118 } else { 119 maxstacksize = 250000000 120 } 121 122 // Record when the world started. 123 runtimeInitTime = nanotime() 124 125 systemstack(func() { 126 newm(sysmon, nil) 127 }) 128 129 // Lock the main goroutine onto this, the main OS thread, 130 // during initialization. Most programs won't care, but a few 131 // do require certain calls to be made by the main thread. 132 // Those can arrange for main.main to run in the main thread 133 // by calling runtime.LockOSThread during initialization 134 // to preserve the lock. 135 lockOSThread() 136 137 if g.m != &m0 { 138 throw("runtime.main not on m0") 139 } 140 141 runtime_init() // must be before defer 142 143 // Defer unlock so that runtime.Goexit during init does the unlock too. 144 needUnlock := true 145 defer func() { 146 if needUnlock { 147 unlockOSThread() 148 } 149 }() 150 151 gcenable() 152 153 main_init_done = make(chan bool) 154 if iscgo { 155 if _cgo_thread_start == nil { 156 throw("_cgo_thread_start missing") 157 } 158 if GOOS != "windows" { 159 if _cgo_setenv == nil { 160 throw("_cgo_setenv missing") 161 } 162 if _cgo_unsetenv == nil { 163 throw("_cgo_unsetenv missing") 164 } 165 } 166 if _cgo_notify_runtime_init_done == nil { 167 throw("_cgo_notify_runtime_init_done missing") 168 } 169 cgocall(_cgo_notify_runtime_init_done, nil) 170 } 171 172 main_init() 173 close(main_init_done) 174 175 needUnlock = false 176 unlockOSThread() 177 178 if isarchive || islibrary { 179 // A program compiled with -buildmode=c-archive or c-shared 180 // has a main, but it is not executed. 181 return 182 } 183 main_main() 184 if raceenabled { 185 racefini() 186 } 187 188 // Make racy client program work: if panicking on 189 // another goroutine at the same time as main returns, 190 // let the other goroutine finish printing the panic trace. 191 // Once it does, it will exit. See issue 3934. 192 if panicking != 0 { 193 gopark(nil, nil, "panicwait", traceEvGoStop, 1) 194 } 195 196 exit(0) 197 for { 198 var x *int32 199 *x = 0 200 } 201 } 202 203 // os_beforeExit is called from os.Exit(0). 204 //go:linkname os_beforeExit os.runtime_beforeExit 205 func os_beforeExit() { 206 if raceenabled { 207 racefini() 208 } 209 } 210 211 // start forcegc helper goroutine 212 func init() { 213 go forcegchelper() 214 } 215 216 func forcegchelper() { 217 forcegc.g = getg() 218 for { 219 lock(&forcegc.lock) 220 if forcegc.idle != 0 { 221 throw("forcegc: phase error") 222 } 223 atomic.Store(&forcegc.idle, 1) 224 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1) 225 // this goroutine is explicitly resumed by sysmon 226 if debug.gctrace > 0 { 227 println("GC forced") 228 } 229 gcStart(gcBackgroundMode, true) 230 } 231 } 232 233 //go:nosplit 234 235 // Gosched yields the processor, allowing other goroutines to run. It does not 236 // suspend the current goroutine, so execution resumes automatically. 237 func Gosched() { 238 mcall(gosched_m) 239 } 240 241 // Puts the current goroutine into a waiting state and calls unlockf. 242 // If unlockf returns false, the goroutine is resumed. 243 // unlockf must not access this G's stack, as it may be moved between 244 // the call to gopark and the call to unlockf. 245 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) { 246 mp := acquirem() 247 gp := mp.curg 248 status := readgstatus(gp) 249 if status != _Grunning && status != _Gscanrunning { 250 throw("gopark: bad g status") 251 } 252 mp.waitlock = lock 253 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) 254 gp.waitreason = reason 255 mp.waittraceev = traceEv 256 mp.waittraceskip = traceskip 257 releasem(mp) 258 // can't do anything that might move the G between Ms here. 259 mcall(park_m) 260 } 261 262 // Puts the current goroutine into a waiting state and unlocks the lock. 263 // The goroutine can be made runnable again by calling goready(gp). 264 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) { 265 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 266 } 267 268 func goready(gp *g, traceskip int) { 269 systemstack(func() { 270 ready(gp, traceskip, true) 271 }) 272 } 273 274 //go:nosplit 275 func acquireSudog() *sudog { 276 // Delicate dance: the semaphore implementation calls 277 // acquireSudog, acquireSudog calls new(sudog), 278 // new calls malloc, malloc can call the garbage collector, 279 // and the garbage collector calls the semaphore implementation 280 // in stopTheWorld. 281 // Break the cycle by doing acquirem/releasem around new(sudog). 282 // The acquirem/releasem increments m.locks during new(sudog), 283 // which keeps the garbage collector from being invoked. 284 mp := acquirem() 285 pp := mp.p.ptr() 286 if len(pp.sudogcache) == 0 { 287 lock(&sched.sudoglock) 288 // First, try to grab a batch from central cache. 289 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 290 s := sched.sudogcache 291 sched.sudogcache = s.next 292 s.next = nil 293 pp.sudogcache = append(pp.sudogcache, s) 294 } 295 unlock(&sched.sudoglock) 296 // If the central cache is empty, allocate a new one. 297 if len(pp.sudogcache) == 0 { 298 pp.sudogcache = append(pp.sudogcache, new(sudog)) 299 } 300 } 301 n := len(pp.sudogcache) 302 s := pp.sudogcache[n-1] 303 pp.sudogcache[n-1] = nil 304 pp.sudogcache = pp.sudogcache[:n-1] 305 if s.elem != nil { 306 throw("acquireSudog: found s.elem != nil in cache") 307 } 308 releasem(mp) 309 return s 310 } 311 312 //go:nosplit 313 func releaseSudog(s *sudog) { 314 if s.elem != nil { 315 throw("runtime: sudog with non-nil elem") 316 } 317 if s.selectdone != nil { 318 throw("runtime: sudog with non-nil selectdone") 319 } 320 if s.next != nil { 321 throw("runtime: sudog with non-nil next") 322 } 323 if s.prev != nil { 324 throw("runtime: sudog with non-nil prev") 325 } 326 if s.waitlink != nil { 327 throw("runtime: sudog with non-nil waitlink") 328 } 329 if s.c != nil { 330 throw("runtime: sudog with non-nil c") 331 } 332 gp := getg() 333 if gp.param != nil { 334 throw("runtime: releaseSudog with non-nil gp.param") 335 } 336 mp := acquirem() // avoid rescheduling to another P 337 pp := mp.p.ptr() 338 if len(pp.sudogcache) == cap(pp.sudogcache) { 339 // Transfer half of local cache to the central cache. 340 var first, last *sudog 341 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 342 n := len(pp.sudogcache) 343 p := pp.sudogcache[n-1] 344 pp.sudogcache[n-1] = nil 345 pp.sudogcache = pp.sudogcache[:n-1] 346 if first == nil { 347 first = p 348 } else { 349 last.next = p 350 } 351 last = p 352 } 353 lock(&sched.sudoglock) 354 last.next = sched.sudogcache 355 sched.sudogcache = first 356 unlock(&sched.sudoglock) 357 } 358 pp.sudogcache = append(pp.sudogcache, s) 359 releasem(mp) 360 } 361 362 // funcPC returns the entry PC of the function f. 363 // It assumes that f is a func value. Otherwise the behavior is undefined. 364 //go:nosplit 365 func funcPC(f interface{}) uintptr { 366 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize)) 367 } 368 369 // called from assembly 370 func badmcall(fn func(*g)) { 371 throw("runtime: mcall called on m->g0 stack") 372 } 373 374 func badmcall2(fn func(*g)) { 375 throw("runtime: mcall function returned") 376 } 377 378 func badreflectcall() { 379 panic(plainError("arg size to reflect.call more than 1GB")) 380 } 381 382 func lockedOSThread() bool { 383 gp := getg() 384 return gp.lockedm != nil && gp.m.lockedg != nil 385 } 386 387 var ( 388 allgs []*g 389 allglock mutex 390 ) 391 392 func allgadd(gp *g) { 393 if readgstatus(gp) == _Gidle { 394 throw("allgadd: bad status Gidle") 395 } 396 397 lock(&allglock) 398 allgs = append(allgs, gp) 399 allglen = uintptr(len(allgs)) 400 401 // Grow GC rescan list if necessary. 402 if len(allgs) > cap(work.rescan.list) { 403 lock(&work.rescan.lock) 404 l := work.rescan.list 405 // Let append do the heavy lifting, but keep the 406 // length the same. 407 work.rescan.list = append(l[:cap(l)], 0)[:len(l)] 408 unlock(&work.rescan.lock) 409 } 410 unlock(&allglock) 411 } 412 413 const ( 414 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 415 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 416 _GoidCacheBatch = 16 417 ) 418 419 // The bootstrap sequence is: 420 // 421 // call osinit 422 // call schedinit 423 // make & queue new G 424 // call runtime·mstart 425 // 426 // The new G calls runtime·main. 427 func schedinit() { 428 // raceinit must be the first call to race detector. 429 // In particular, it must be done before mallocinit below calls racemapshadow. 430 _g_ := getg() 431 if raceenabled { 432 _g_.racectx, raceprocctx0 = raceinit() 433 } 434 435 sched.maxmcount = 10000 436 437 tracebackinit() 438 moduledataverify() 439 stackinit() 440 mallocinit() 441 mcommoninit(_g_.m) 442 typelinksinit() 443 itabsinit() 444 445 msigsave(_g_.m) 446 initSigmask = _g_.m.sigmask 447 448 goargs() 449 goenvs() 450 parsedebugvars() 451 gcinit() 452 453 sched.lastpoll = uint64(nanotime()) 454 procs := int(ncpu) 455 if procs > _MaxGomaxprocs { 456 procs = _MaxGomaxprocs 457 } 458 if n := atoi(gogetenv("GOMAXPROCS")); n > 0 { 459 if n > _MaxGomaxprocs { 460 n = _MaxGomaxprocs 461 } 462 procs = n 463 } 464 if procresize(int32(procs)) != nil { 465 throw("unknown runnable goroutine during bootstrap") 466 } 467 468 if buildVersion == "" { 469 // Condition should never trigger. This code just serves 470 // to ensure runtime·buildVersion is kept in the resulting binary. 471 buildVersion = "unknown" 472 } 473 } 474 475 func dumpgstatus(gp *g) { 476 _g_ := getg() 477 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 478 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 479 } 480 481 func checkmcount() { 482 // sched lock is held 483 if sched.mcount > sched.maxmcount { 484 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 485 throw("thread exhaustion") 486 } 487 } 488 489 func mcommoninit(mp *m) { 490 _g_ := getg() 491 492 // g0 stack won't make sense for user (and is not necessary unwindable). 493 if _g_ != _g_.m.g0 { 494 callers(1, mp.createstack[:]) 495 } 496 497 mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks()) 498 if mp.fastrand == 0 { 499 mp.fastrand = 0x49f6428a 500 } 501 502 lock(&sched.lock) 503 mp.id = sched.mcount 504 sched.mcount++ 505 checkmcount() 506 mpreinit(mp) 507 if mp.gsignal != nil { 508 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 509 } 510 511 // Add to allm so garbage collector doesn't free g->m 512 // when it is just in a register or thread-local storage. 513 mp.alllink = allm 514 515 // NumCgoCall() iterates over allm w/o schedlock, 516 // so we need to publish it safely. 517 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 518 unlock(&sched.lock) 519 520 // Allocate memory to hold a cgo traceback if the cgo call crashes. 521 if iscgo || GOOS == "solaris" || GOOS == "windows" { 522 mp.cgoCallers = new(cgoCallers) 523 } 524 } 525 526 // Mark gp ready to run. 527 func ready(gp *g, traceskip int, next bool) { 528 if trace.enabled { 529 traceGoUnpark(gp, traceskip) 530 } 531 532 status := readgstatus(gp) 533 534 // Mark runnable. 535 _g_ := getg() 536 _g_.m.locks++ // disable preemption because it can be holding p in a local var 537 if status&^_Gscan != _Gwaiting { 538 dumpgstatus(gp) 539 throw("bad g->status in ready") 540 } 541 542 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 543 casgstatus(gp, _Gwaiting, _Grunnable) 544 runqput(_g_.m.p.ptr(), gp, next) 545 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { // TODO: fast atomic 546 wakep() 547 } 548 _g_.m.locks-- 549 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack 550 _g_.stackguard0 = stackPreempt 551 } 552 } 553 554 func gcprocs() int32 { 555 // Figure out how many CPUs to use during GC. 556 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 557 lock(&sched.lock) 558 n := gomaxprocs 559 if n > ncpu { 560 n = ncpu 561 } 562 if n > _MaxGcproc { 563 n = _MaxGcproc 564 } 565 if n > sched.nmidle+1 { // one M is currently running 566 n = sched.nmidle + 1 567 } 568 unlock(&sched.lock) 569 return n 570 } 571 572 func needaddgcproc() bool { 573 lock(&sched.lock) 574 n := gomaxprocs 575 if n > ncpu { 576 n = ncpu 577 } 578 if n > _MaxGcproc { 579 n = _MaxGcproc 580 } 581 n -= sched.nmidle + 1 // one M is currently running 582 unlock(&sched.lock) 583 return n > 0 584 } 585 586 func helpgc(nproc int32) { 587 _g_ := getg() 588 lock(&sched.lock) 589 pos := 0 590 for n := int32(1); n < nproc; n++ { // one M is currently running 591 if allp[pos].mcache == _g_.m.mcache { 592 pos++ 593 } 594 mp := mget() 595 if mp == nil { 596 throw("gcprocs inconsistency") 597 } 598 mp.helpgc = n 599 mp.p.set(allp[pos]) 600 mp.mcache = allp[pos].mcache 601 pos++ 602 notewakeup(&mp.park) 603 } 604 unlock(&sched.lock) 605 } 606 607 // freezeStopWait is a large value that freezetheworld sets 608 // sched.stopwait to in order to request that all Gs permanently stop. 609 const freezeStopWait = 0x7fffffff 610 611 // Similar to stopTheWorld but best-effort and can be called several times. 612 // There is no reverse operation, used during crashing. 613 // This function must not lock any mutexes. 614 func freezetheworld() { 615 // stopwait and preemption requests can be lost 616 // due to races with concurrently executing threads, 617 // so try several times 618 for i := 0; i < 5; i++ { 619 // this should tell the scheduler to not start any new goroutines 620 sched.stopwait = freezeStopWait 621 atomic.Store(&sched.gcwaiting, 1) 622 // this should stop running goroutines 623 if !preemptall() { 624 break // no running goroutines 625 } 626 usleep(1000) 627 } 628 // to be sure 629 usleep(1000) 630 preemptall() 631 usleep(1000) 632 } 633 634 func isscanstatus(status uint32) bool { 635 if status == _Gscan { 636 throw("isscanstatus: Bad status Gscan") 637 } 638 return status&_Gscan == _Gscan 639 } 640 641 // All reads and writes of g's status go through readgstatus, casgstatus 642 // castogscanstatus, casfrom_Gscanstatus. 643 //go:nosplit 644 func readgstatus(gp *g) uint32 { 645 return atomic.Load(&gp.atomicstatus) 646 } 647 648 // Ownership of gcscanvalid: 649 // 650 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), 651 // then gp owns gp.gcscanvalid, and other goroutines must not modify it. 652 // 653 // Otherwise, a second goroutine can lock the scan state by setting _Gscan 654 // in the status bit and then modify gcscanvalid, and then unlock the scan state. 655 // 656 // Note that the first condition implies an exception to the second: 657 // if a second goroutine changes gp's status to _Grunning|_Gscan, 658 // that second goroutine still does not have the right to modify gcscanvalid. 659 660 // The Gscanstatuses are acting like locks and this releases them. 661 // If it proves to be a performance hit we should be able to make these 662 // simple atomic stores but for now we are going to throw if 663 // we see an inconsistent state. 664 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 665 success := false 666 667 // Check that transition is valid. 668 switch oldval { 669 default: 670 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 671 dumpgstatus(gp) 672 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 673 case _Gscanrunnable, 674 _Gscanwaiting, 675 _Gscanrunning, 676 _Gscansyscall: 677 if newval == oldval&^_Gscan { 678 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 679 } 680 } 681 if !success { 682 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 683 dumpgstatus(gp) 684 throw("casfrom_Gscanstatus: gp->status is not in scan state") 685 } 686 } 687 688 // This will return false if the gp is not in the expected status and the cas fails. 689 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 690 func castogscanstatus(gp *g, oldval, newval uint32) bool { 691 switch oldval { 692 case _Grunnable, 693 _Grunning, 694 _Gwaiting, 695 _Gsyscall: 696 if newval == oldval|_Gscan { 697 return atomic.Cas(&gp.atomicstatus, oldval, newval) 698 } 699 } 700 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 701 throw("castogscanstatus") 702 panic("not reached") 703 } 704 705 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 706 // and casfrom_Gscanstatus instead. 707 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 708 // put it in the Gscan state is finished. 709 //go:nosplit 710 func casgstatus(gp *g, oldval, newval uint32) { 711 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 712 systemstack(func() { 713 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 714 throw("casgstatus: bad incoming values") 715 }) 716 } 717 718 if oldval == _Grunning && gp.gcscanvalid { 719 // If oldvall == _Grunning, then the actual status must be 720 // _Grunning or _Grunning|_Gscan; either way, 721 // we own gp.gcscanvalid, so it's safe to read. 722 // gp.gcscanvalid must not be true when we are running. 723 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n") 724 throw("casgstatus") 725 } 726 727 // See http://golang.org/cl/21503 for justification of the yield delay. 728 const yieldDelay = 5 * 1000 729 var nextYield int64 730 731 // loop if gp->atomicstatus is in a scan state giving 732 // GC time to finish and change the state to oldval. 733 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { 734 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 735 systemstack(func() { 736 throw("casgstatus: waiting for Gwaiting but is Grunnable") 737 }) 738 } 739 // Help GC if needed. 740 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { 741 // gp.preemptscan = false 742 // systemstack(func() { 743 // gcphasework(gp) 744 // }) 745 // } 746 // But meanwhile just yield. 747 if i == 0 { 748 nextYield = nanotime() + yieldDelay 749 } 750 if nanotime() < nextYield { 751 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { 752 procyield(1) 753 } 754 } else { 755 osyield() 756 nextYield = nanotime() + yieldDelay/2 757 } 758 } 759 if newval == _Grunning && gp.gcscanvalid { 760 // Run queueRescan on the system stack so it has more space. 761 systemstack(func() { queueRescan(gp) }) 762 } 763 } 764 765 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 766 // Returns old status. Cannot call casgstatus directly, because we are racing with an 767 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 768 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 769 // it would loop waiting for the status to go back to Gwaiting, which it never will. 770 //go:nosplit 771 func casgcopystack(gp *g) uint32 { 772 for { 773 oldstatus := readgstatus(gp) &^ _Gscan 774 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 775 throw("copystack: bad status, not Gwaiting or Grunnable") 776 } 777 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 778 return oldstatus 779 } 780 } 781 } 782 783 // scang blocks until gp's stack has been scanned. 784 // It might be scanned by scang or it might be scanned by the goroutine itself. 785 // Either way, the stack scan has completed when scang returns. 786 func scang(gp *g, gcw *gcWork) { 787 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone. 788 // Nothing is racing with us now, but gcscandone might be set to true left over 789 // from an earlier round of stack scanning (we scan twice per GC). 790 // We use gcscandone to record whether the scan has been done during this round. 791 // It is important that the scan happens exactly once: if called twice, 792 // the installation of stack barriers will detect the double scan and die. 793 794 gp.gcscandone = false 795 796 // See http://golang.org/cl/21503 for justification of the yield delay. 797 const yieldDelay = 10 * 1000 798 var nextYield int64 799 800 // Endeavor to get gcscandone set to true, 801 // either by doing the stack scan ourselves or by coercing gp to scan itself. 802 // gp.gcscandone can transition from false to true when we're not looking 803 // (if we asked for preemption), so any time we lock the status using 804 // castogscanstatus we have to double-check that the scan is still not done. 805 loop: 806 for i := 0; !gp.gcscandone; i++ { 807 switch s := readgstatus(gp); s { 808 default: 809 dumpgstatus(gp) 810 throw("stopg: invalid status") 811 812 case _Gdead: 813 // No stack. 814 gp.gcscandone = true 815 break loop 816 817 case _Gcopystack: 818 // Stack being switched. Go around again. 819 820 case _Grunnable, _Gsyscall, _Gwaiting: 821 // Claim goroutine by setting scan bit. 822 // Racing with execution or readying of gp. 823 // The scan bit keeps them from running 824 // the goroutine until we're done. 825 if castogscanstatus(gp, s, s|_Gscan) { 826 if !gp.gcscandone { 827 scanstack(gp, gcw) 828 gp.gcscandone = true 829 } 830 restartg(gp) 831 break loop 832 } 833 834 case _Gscanwaiting: 835 // newstack is doing a scan for us right now. Wait. 836 837 case _Grunning: 838 // Goroutine running. Try to preempt execution so it can scan itself. 839 // The preemption handler (in newstack) does the actual scan. 840 841 // Optimization: if there is already a pending preemption request 842 // (from the previous loop iteration), don't bother with the atomics. 843 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt { 844 break 845 } 846 847 // Ask for preemption and self scan. 848 if castogscanstatus(gp, _Grunning, _Gscanrunning) { 849 if !gp.gcscandone { 850 gp.preemptscan = true 851 gp.preempt = true 852 gp.stackguard0 = stackPreempt 853 } 854 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning) 855 } 856 } 857 858 if i == 0 { 859 nextYield = nanotime() + yieldDelay 860 } 861 if nanotime() < nextYield { 862 procyield(10) 863 } else { 864 osyield() 865 nextYield = nanotime() + yieldDelay/2 866 } 867 } 868 869 gp.preemptscan = false // cancel scan request if no longer needed 870 } 871 872 // The GC requests that this routine be moved from a scanmumble state to a mumble state. 873 func restartg(gp *g) { 874 s := readgstatus(gp) 875 switch s { 876 default: 877 dumpgstatus(gp) 878 throw("restartg: unexpected status") 879 880 case _Gdead: 881 // ok 882 883 case _Gscanrunnable, 884 _Gscanwaiting, 885 _Gscansyscall: 886 casfrom_Gscanstatus(gp, s, s&^_Gscan) 887 } 888 } 889 890 // stopTheWorld stops all P's from executing goroutines, interrupting 891 // all goroutines at GC safe points and records reason as the reason 892 // for the stop. On return, only the current goroutine's P is running. 893 // stopTheWorld must not be called from a system stack and the caller 894 // must not hold worldsema. The caller must call startTheWorld when 895 // other P's should resume execution. 896 // 897 // stopTheWorld is safe for multiple goroutines to call at the 898 // same time. Each will execute its own stop, and the stops will 899 // be serialized. 900 // 901 // This is also used by routines that do stack dumps. If the system is 902 // in panic or being exited, this may not reliably stop all 903 // goroutines. 904 func stopTheWorld(reason string) { 905 semacquire(&worldsema, false) 906 getg().m.preemptoff = reason 907 systemstack(stopTheWorldWithSema) 908 } 909 910 // startTheWorld undoes the effects of stopTheWorld. 911 func startTheWorld() { 912 systemstack(startTheWorldWithSema) 913 // worldsema must be held over startTheWorldWithSema to ensure 914 // gomaxprocs cannot change while worldsema is held. 915 semrelease(&worldsema) 916 getg().m.preemptoff = "" 917 } 918 919 // Holding worldsema grants an M the right to try to stop the world 920 // and prevents gomaxprocs from changing concurrently. 921 var worldsema uint32 = 1 922 923 // stopTheWorldWithSema is the core implementation of stopTheWorld. 924 // The caller is responsible for acquiring worldsema and disabling 925 // preemption first and then should stopTheWorldWithSema on the system 926 // stack: 927 // 928 // semacquire(&worldsema, false) 929 // m.preemptoff = "reason" 930 // systemstack(stopTheWorldWithSema) 931 // 932 // When finished, the caller must either call startTheWorld or undo 933 // these three operations separately: 934 // 935 // m.preemptoff = "" 936 // systemstack(startTheWorldWithSema) 937 // semrelease(&worldsema) 938 // 939 // It is allowed to acquire worldsema once and then execute multiple 940 // startTheWorldWithSema/stopTheWorldWithSema pairs. 941 // Other P's are able to execute between successive calls to 942 // startTheWorldWithSema and stopTheWorldWithSema. 943 // Holding worldsema causes any other goroutines invoking 944 // stopTheWorld to block. 945 func stopTheWorldWithSema() { 946 _g_ := getg() 947 948 // If we hold a lock, then we won't be able to stop another M 949 // that is blocked trying to acquire the lock. 950 if _g_.m.locks > 0 { 951 throw("stopTheWorld: holding locks") 952 } 953 954 lock(&sched.lock) 955 sched.stopwait = gomaxprocs 956 atomic.Store(&sched.gcwaiting, 1) 957 preemptall() 958 // stop current P 959 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 960 sched.stopwait-- 961 // try to retake all P's in Psyscall status 962 for i := 0; i < int(gomaxprocs); i++ { 963 p := allp[i] 964 s := p.status 965 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 966 if trace.enabled { 967 traceGoSysBlock(p) 968 traceProcStop(p) 969 } 970 p.syscalltick++ 971 sched.stopwait-- 972 } 973 } 974 // stop idle P's 975 for { 976 p := pidleget() 977 if p == nil { 978 break 979 } 980 p.status = _Pgcstop 981 sched.stopwait-- 982 } 983 wait := sched.stopwait > 0 984 unlock(&sched.lock) 985 986 // wait for remaining P's to stop voluntarily 987 if wait { 988 for { 989 // wait for 100us, then try to re-preempt in case of any races 990 if notetsleep(&sched.stopnote, 100*1000) { 991 noteclear(&sched.stopnote) 992 break 993 } 994 preemptall() 995 } 996 } 997 if sched.stopwait != 0 { 998 throw("stopTheWorld: not stopped") 999 } 1000 for i := 0; i < int(gomaxprocs); i++ { 1001 p := allp[i] 1002 if p.status != _Pgcstop { 1003 throw("stopTheWorld: not stopped") 1004 } 1005 } 1006 } 1007 1008 func mhelpgc() { 1009 _g_ := getg() 1010 _g_.m.helpgc = -1 1011 } 1012 1013 func startTheWorldWithSema() { 1014 _g_ := getg() 1015 1016 _g_.m.locks++ // disable preemption because it can be holding p in a local var 1017 gp := netpoll(false) // non-blocking 1018 injectglist(gp) 1019 add := needaddgcproc() 1020 lock(&sched.lock) 1021 1022 procs := gomaxprocs 1023 if newprocs != 0 { 1024 procs = newprocs 1025 newprocs = 0 1026 } 1027 p1 := procresize(procs) 1028 sched.gcwaiting = 0 1029 if sched.sysmonwait != 0 { 1030 sched.sysmonwait = 0 1031 notewakeup(&sched.sysmonnote) 1032 } 1033 unlock(&sched.lock) 1034 1035 for p1 != nil { 1036 p := p1 1037 p1 = p1.link.ptr() 1038 if p.m != 0 { 1039 mp := p.m.ptr() 1040 p.m = 0 1041 if mp.nextp != 0 { 1042 throw("startTheWorld: inconsistent mp->nextp") 1043 } 1044 mp.nextp.set(p) 1045 notewakeup(&mp.park) 1046 } else { 1047 // Start M to run P. Do not start another M below. 1048 newm(nil, p) 1049 add = false 1050 } 1051 } 1052 1053 // Wakeup an additional proc in case we have excessive runnable goroutines 1054 // in local queues or in the global queue. If we don't, the proc will park itself. 1055 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 1056 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 1057 wakep() 1058 } 1059 1060 if add { 1061 // If GC could have used another helper proc, start one now, 1062 // in the hope that it will be available next time. 1063 // It would have been even better to start it before the collection, 1064 // but doing so requires allocating memory, so it's tricky to 1065 // coordinate. This lazy approach works out in practice: 1066 // we don't mind if the first couple gc rounds don't have quite 1067 // the maximum number of procs. 1068 newm(mhelpgc, nil) 1069 } 1070 _g_.m.locks-- 1071 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1072 _g_.stackguard0 = stackPreempt 1073 } 1074 } 1075 1076 // Called to start an M. 1077 //go:nosplit 1078 func mstart() { 1079 _g_ := getg() 1080 1081 if _g_.stack.lo == 0 { 1082 // Initialize stack bounds from system stack. 1083 // Cgo may have left stack size in stack.hi. 1084 size := _g_.stack.hi 1085 if size == 0 { 1086 size = 8192 * sys.StackGuardMultiplier 1087 } 1088 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1089 _g_.stack.lo = _g_.stack.hi - size + 1024 1090 } 1091 // Initialize stack guards so that we can start calling 1092 // both Go and C functions with stack growth prologues. 1093 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1094 _g_.stackguard1 = _g_.stackguard0 1095 mstart1() 1096 } 1097 1098 func mstart1() { 1099 _g_ := getg() 1100 1101 if _g_ != _g_.m.g0 { 1102 throw("bad runtime·mstart") 1103 } 1104 1105 // Record top of stack for use by mcall. 1106 // Once we call schedule we're never coming back, 1107 // so other calls can reuse this stack space. 1108 gosave(&_g_.m.g0.sched) 1109 _g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used 1110 asminit() 1111 minit() 1112 1113 // Install signal handlers; after minit so that minit can 1114 // prepare the thread to be able to handle the signals. 1115 if _g_.m == &m0 { 1116 // Create an extra M for callbacks on threads not created by Go. 1117 if iscgo && !cgoHasExtraM { 1118 cgoHasExtraM = true 1119 newextram() 1120 } 1121 initsig(false) 1122 } 1123 1124 if fn := _g_.m.mstartfn; fn != nil { 1125 fn() 1126 } 1127 1128 if _g_.m.helpgc != 0 { 1129 _g_.m.helpgc = 0 1130 stopm() 1131 } else if _g_.m != &m0 { 1132 acquirep(_g_.m.nextp.ptr()) 1133 _g_.m.nextp = 0 1134 } 1135 schedule() 1136 } 1137 1138 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1139 // If a P is currently executing code, this will bring the P to a GC 1140 // safe point and execute fn on that P. If the P is not executing code 1141 // (it is idle or in a syscall), this will call fn(p) directly while 1142 // preventing the P from exiting its state. This does not ensure that 1143 // fn will run on every CPU executing Go code, but it acts as a global 1144 // memory barrier. GC uses this as a "ragged barrier." 1145 // 1146 // The caller must hold worldsema. 1147 // 1148 //go:systemstack 1149 func forEachP(fn func(*p)) { 1150 mp := acquirem() 1151 _p_ := getg().m.p.ptr() 1152 1153 lock(&sched.lock) 1154 if sched.safePointWait != 0 { 1155 throw("forEachP: sched.safePointWait != 0") 1156 } 1157 sched.safePointWait = gomaxprocs - 1 1158 sched.safePointFn = fn 1159 1160 // Ask all Ps to run the safe point function. 1161 for _, p := range allp[:gomaxprocs] { 1162 if p != _p_ { 1163 atomic.Store(&p.runSafePointFn, 1) 1164 } 1165 } 1166 preemptall() 1167 1168 // Any P entering _Pidle or _Psyscall from now on will observe 1169 // p.runSafePointFn == 1 and will call runSafePointFn when 1170 // changing its status to _Pidle/_Psyscall. 1171 1172 // Run safe point function for all idle Ps. sched.pidle will 1173 // not change because we hold sched.lock. 1174 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1175 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1176 fn(p) 1177 sched.safePointWait-- 1178 } 1179 } 1180 1181 wait := sched.safePointWait > 0 1182 unlock(&sched.lock) 1183 1184 // Run fn for the current P. 1185 fn(_p_) 1186 1187 // Force Ps currently in _Psyscall into _Pidle and hand them 1188 // off to induce safe point function execution. 1189 for i := 0; i < int(gomaxprocs); i++ { 1190 p := allp[i] 1191 s := p.status 1192 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1193 if trace.enabled { 1194 traceGoSysBlock(p) 1195 traceProcStop(p) 1196 } 1197 p.syscalltick++ 1198 handoffp(p) 1199 } 1200 } 1201 1202 // Wait for remaining Ps to run fn. 1203 if wait { 1204 for { 1205 // Wait for 100us, then try to re-preempt in 1206 // case of any races. 1207 // 1208 // Requires system stack. 1209 if notetsleep(&sched.safePointNote, 100*1000) { 1210 noteclear(&sched.safePointNote) 1211 break 1212 } 1213 preemptall() 1214 } 1215 } 1216 if sched.safePointWait != 0 { 1217 throw("forEachP: not done") 1218 } 1219 for i := 0; i < int(gomaxprocs); i++ { 1220 p := allp[i] 1221 if p.runSafePointFn != 0 { 1222 throw("forEachP: P did not run fn") 1223 } 1224 } 1225 1226 lock(&sched.lock) 1227 sched.safePointFn = nil 1228 unlock(&sched.lock) 1229 releasem(mp) 1230 } 1231 1232 // runSafePointFn runs the safe point function, if any, for this P. 1233 // This should be called like 1234 // 1235 // if getg().m.p.runSafePointFn != 0 { 1236 // runSafePointFn() 1237 // } 1238 // 1239 // runSafePointFn must be checked on any transition in to _Pidle or 1240 // _Psyscall to avoid a race where forEachP sees that the P is running 1241 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1242 // nor the P run the safe-point function. 1243 func runSafePointFn() { 1244 p := getg().m.p.ptr() 1245 // Resolve the race between forEachP running the safe-point 1246 // function on this P's behalf and this P running the 1247 // safe-point function directly. 1248 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1249 return 1250 } 1251 sched.safePointFn(p) 1252 lock(&sched.lock) 1253 sched.safePointWait-- 1254 if sched.safePointWait == 0 { 1255 notewakeup(&sched.safePointNote) 1256 } 1257 unlock(&sched.lock) 1258 } 1259 1260 // When running with cgo, we call _cgo_thread_start 1261 // to start threads for us so that we can play nicely with 1262 // foreign code. 1263 var cgoThreadStart unsafe.Pointer 1264 1265 type cgothreadstart struct { 1266 g guintptr 1267 tls *uint64 1268 fn unsafe.Pointer 1269 } 1270 1271 // Allocate a new m unassociated with any thread. 1272 // Can use p for allocation context if needed. 1273 // fn is recorded as the new m's m.mstartfn. 1274 // 1275 // This function it known to the compiler to inhibit the 1276 // go:nowritebarrierrec annotation because it uses P for allocation. 1277 func allocm(_p_ *p, fn func()) *m { 1278 _g_ := getg() 1279 _g_.m.locks++ // disable GC because it can be called from sysmon 1280 if _g_.m.p == 0 { 1281 acquirep(_p_) // temporarily borrow p for mallocs in this function 1282 } 1283 mp := new(m) 1284 mp.mstartfn = fn 1285 mcommoninit(mp) 1286 1287 // In case of cgo or Solaris, pthread_create will make us a stack. 1288 // Windows and Plan 9 will layout sched stack on OS stack. 1289 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" { 1290 mp.g0 = malg(-1) 1291 } else { 1292 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1293 } 1294 mp.g0.m = mp 1295 1296 if _p_ == _g_.m.p.ptr() { 1297 releasep() 1298 } 1299 _g_.m.locks-- 1300 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1301 _g_.stackguard0 = stackPreempt 1302 } 1303 1304 return mp 1305 } 1306 1307 // needm is called when a cgo callback happens on a 1308 // thread without an m (a thread not created by Go). 1309 // In this case, needm is expected to find an m to use 1310 // and return with m, g initialized correctly. 1311 // Since m and g are not set now (likely nil, but see below) 1312 // needm is limited in what routines it can call. In particular 1313 // it can only call nosplit functions (textflag 7) and cannot 1314 // do any scheduling that requires an m. 1315 // 1316 // In order to avoid needing heavy lifting here, we adopt 1317 // the following strategy: there is a stack of available m's 1318 // that can be stolen. Using compare-and-swap 1319 // to pop from the stack has ABA races, so we simulate 1320 // a lock by doing an exchange (via casp) to steal the stack 1321 // head and replace the top pointer with MLOCKED (1). 1322 // This serves as a simple spin lock that we can use even 1323 // without an m. The thread that locks the stack in this way 1324 // unlocks the stack by storing a valid stack head pointer. 1325 // 1326 // In order to make sure that there is always an m structure 1327 // available to be stolen, we maintain the invariant that there 1328 // is always one more than needed. At the beginning of the 1329 // program (if cgo is in use) the list is seeded with a single m. 1330 // If needm finds that it has taken the last m off the list, its job 1331 // is - once it has installed its own m so that it can do things like 1332 // allocate memory - to create a spare m and put it on the list. 1333 // 1334 // Each of these extra m's also has a g0 and a curg that are 1335 // pressed into service as the scheduling stack and current 1336 // goroutine for the duration of the cgo callback. 1337 // 1338 // When the callback is done with the m, it calls dropm to 1339 // put the m back on the list. 1340 //go:nosplit 1341 func needm(x byte) { 1342 if iscgo && !cgoHasExtraM { 1343 // Can happen if C/C++ code calls Go from a global ctor. 1344 // Can not throw, because scheduler is not initialized yet. 1345 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1346 exit(1) 1347 } 1348 1349 // Lock extra list, take head, unlock popped list. 1350 // nilokay=false is safe here because of the invariant above, 1351 // that the extra list always contains or will soon contain 1352 // at least one m. 1353 mp := lockextra(false) 1354 1355 // Set needextram when we've just emptied the list, 1356 // so that the eventual call into cgocallbackg will 1357 // allocate a new m for the extra list. We delay the 1358 // allocation until then so that it can be done 1359 // after exitsyscall makes sure it is okay to be 1360 // running at all (that is, there's no garbage collection 1361 // running right now). 1362 mp.needextram = mp.schedlink == 0 1363 unlockextra(mp.schedlink.ptr()) 1364 1365 // Save and block signals before installing g. 1366 // Once g is installed, any incoming signals will try to execute, 1367 // but we won't have the sigaltstack settings and other data 1368 // set up appropriately until the end of minit, which will 1369 // unblock the signals. This is the same dance as when 1370 // starting a new m to run Go code via newosproc. 1371 msigsave(mp) 1372 sigblock() 1373 1374 // Install g (= m->g0) and set the stack bounds 1375 // to match the current stack. We don't actually know 1376 // how big the stack is, like we don't know how big any 1377 // scheduling stack is, but we assume there's at least 32 kB, 1378 // which is more than enough for us. 1379 setg(mp.g0) 1380 _g_ := getg() 1381 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 1382 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 1383 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1384 1385 // Initialize this thread to use the m. 1386 asminit() 1387 minit() 1388 } 1389 1390 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 1391 1392 // newextram allocates an m and puts it on the extra list. 1393 // It is called with a working local m, so that it can do things 1394 // like call schedlock and allocate. 1395 func newextram() { 1396 // Create extra goroutine locked to extra m. 1397 // The goroutine is the context in which the cgo callback will run. 1398 // The sched.pc will never be returned to, but setting it to 1399 // goexit makes clear to the traceback routines where 1400 // the goroutine stack ends. 1401 mp := allocm(nil, nil) 1402 gp := malg(4096) 1403 gp.sched.pc = funcPC(goexit) + sys.PCQuantum 1404 gp.sched.sp = gp.stack.hi 1405 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame 1406 gp.sched.lr = 0 1407 gp.sched.g = guintptr(unsafe.Pointer(gp)) 1408 gp.syscallpc = gp.sched.pc 1409 gp.syscallsp = gp.sched.sp 1410 gp.stktopsp = gp.sched.sp 1411 gp.gcscanvalid = true // fresh G, so no dequeueRescan necessary 1412 gp.gcRescan = -1 1413 // malg returns status as Gidle, change to Gsyscall before adding to allg 1414 // where GC will see it. 1415 casgstatus(gp, _Gidle, _Gsyscall) 1416 gp.m = mp 1417 mp.curg = gp 1418 mp.locked = _LockInternal 1419 mp.lockedg = gp 1420 gp.lockedm = mp 1421 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 1422 if raceenabled { 1423 gp.racectx = racegostart(funcPC(newextram)) 1424 } 1425 // put on allg for garbage collector 1426 allgadd(gp) 1427 1428 // Add m to the extra list. 1429 mnext := lockextra(true) 1430 mp.schedlink.set(mnext) 1431 unlockextra(mp) 1432 } 1433 1434 // dropm is called when a cgo callback has called needm but is now 1435 // done with the callback and returning back into the non-Go thread. 1436 // It puts the current m back onto the extra list. 1437 // 1438 // The main expense here is the call to signalstack to release the 1439 // m's signal stack, and then the call to needm on the next callback 1440 // from this thread. It is tempting to try to save the m for next time, 1441 // which would eliminate both these costs, but there might not be 1442 // a next time: the current thread (which Go does not control) might exit. 1443 // If we saved the m for that thread, there would be an m leak each time 1444 // such a thread exited. Instead, we acquire and release an m on each 1445 // call. These should typically not be scheduling operations, just a few 1446 // atomics, so the cost should be small. 1447 // 1448 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1449 // variable using pthread_key_create. Unlike the pthread keys we already use 1450 // on OS X, this dummy key would never be read by Go code. It would exist 1451 // only so that we could register at thread-exit-time destructor. 1452 // That destructor would put the m back onto the extra list. 1453 // This is purely a performance optimization. The current version, 1454 // in which dropm happens on each cgo call, is still correct too. 1455 // We may have to keep the current version on systems with cgo 1456 // but without pthreads, like Windows. 1457 func dropm() { 1458 // Clear m and g, and return m to the extra list. 1459 // After the call to setg we can only call nosplit functions 1460 // with no pointer manipulation. 1461 mp := getg().m 1462 1463 // Block signals before unminit. 1464 // Unminit unregisters the signal handling stack (but needs g on some systems). 1465 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 1466 // It's important not to try to handle a signal between those two steps. 1467 sigmask := mp.sigmask 1468 sigblock() 1469 unminit() 1470 1471 mnext := lockextra(true) 1472 mp.schedlink.set(mnext) 1473 1474 setg(nil) 1475 1476 // Commit the release of mp. 1477 unlockextra(mp) 1478 1479 msigrestore(sigmask) 1480 } 1481 1482 // A helper function for EnsureDropM. 1483 func getm() uintptr { 1484 return uintptr(unsafe.Pointer(getg().m)) 1485 } 1486 1487 var extram uintptr 1488 1489 // lockextra locks the extra list and returns the list head. 1490 // The caller must unlock the list by storing a new list head 1491 // to extram. If nilokay is true, then lockextra will 1492 // return a nil list head if that's what it finds. If nilokay is false, 1493 // lockextra will keep waiting until the list head is no longer nil. 1494 //go:nosplit 1495 func lockextra(nilokay bool) *m { 1496 const locked = 1 1497 1498 for { 1499 old := atomic.Loaduintptr(&extram) 1500 if old == locked { 1501 yield := osyield 1502 yield() 1503 continue 1504 } 1505 if old == 0 && !nilokay { 1506 usleep(1) 1507 continue 1508 } 1509 if atomic.Casuintptr(&extram, old, locked) { 1510 return (*m)(unsafe.Pointer(old)) 1511 } 1512 yield := osyield 1513 yield() 1514 continue 1515 } 1516 } 1517 1518 //go:nosplit 1519 func unlockextra(mp *m) { 1520 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1521 } 1522 1523 // Create a new m. It will start off with a call to fn, or else the scheduler. 1524 // fn needs to be static and not a heap allocated closure. 1525 // May run with m.p==nil, so write barriers are not allowed. 1526 //go:nowritebarrier 1527 func newm(fn func(), _p_ *p) { 1528 mp := allocm(_p_, fn) 1529 mp.nextp.set(_p_) 1530 mp.sigmask = initSigmask 1531 if iscgo { 1532 var ts cgothreadstart 1533 if _cgo_thread_start == nil { 1534 throw("_cgo_thread_start missing") 1535 } 1536 ts.g.set(mp.g0) 1537 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1538 ts.fn = unsafe.Pointer(funcPC(mstart)) 1539 if msanenabled { 1540 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 1541 } 1542 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1543 return 1544 } 1545 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi)) 1546 } 1547 1548 // Stops execution of the current m until new work is available. 1549 // Returns with acquired P. 1550 func stopm() { 1551 _g_ := getg() 1552 1553 if _g_.m.locks != 0 { 1554 throw("stopm holding locks") 1555 } 1556 if _g_.m.p != 0 { 1557 throw("stopm holding p") 1558 } 1559 if _g_.m.spinning { 1560 throw("stopm spinning") 1561 } 1562 1563 retry: 1564 lock(&sched.lock) 1565 mput(_g_.m) 1566 unlock(&sched.lock) 1567 notesleep(&_g_.m.park) 1568 noteclear(&_g_.m.park) 1569 if _g_.m.helpgc != 0 { 1570 gchelper() 1571 _g_.m.helpgc = 0 1572 _g_.m.mcache = nil 1573 _g_.m.p = 0 1574 goto retry 1575 } 1576 acquirep(_g_.m.nextp.ptr()) 1577 _g_.m.nextp = 0 1578 } 1579 1580 func mspinning() { 1581 // startm's caller incremented nmspinning. Set the new M's spinning. 1582 getg().m.spinning = true 1583 } 1584 1585 // Schedules some M to run the p (creates an M if necessary). 1586 // If p==nil, tries to get an idle P, if no idle P's does nothing. 1587 // May run with m.p==nil, so write barriers are not allowed. 1588 // If spinning is set, the caller has incremented nmspinning and startm will 1589 // either decrement nmspinning or set m.spinning in the newly started M. 1590 //go:nowritebarrier 1591 func startm(_p_ *p, spinning bool) { 1592 lock(&sched.lock) 1593 if _p_ == nil { 1594 _p_ = pidleget() 1595 if _p_ == nil { 1596 unlock(&sched.lock) 1597 if spinning { 1598 // The caller incremented nmspinning, but there are no idle Ps, 1599 // so it's okay to just undo the increment and give up. 1600 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1601 throw("startm: negative nmspinning") 1602 } 1603 } 1604 return 1605 } 1606 } 1607 mp := mget() 1608 unlock(&sched.lock) 1609 if mp == nil { 1610 var fn func() 1611 if spinning { 1612 // The caller incremented nmspinning, so set m.spinning in the new M. 1613 fn = mspinning 1614 } 1615 newm(fn, _p_) 1616 return 1617 } 1618 if mp.spinning { 1619 throw("startm: m is spinning") 1620 } 1621 if mp.nextp != 0 { 1622 throw("startm: m has p") 1623 } 1624 if spinning && !runqempty(_p_) { 1625 throw("startm: p has runnable gs") 1626 } 1627 // The caller incremented nmspinning, so set m.spinning in the new M. 1628 mp.spinning = spinning 1629 mp.nextp.set(_p_) 1630 notewakeup(&mp.park) 1631 } 1632 1633 // Hands off P from syscall or locked M. 1634 // Always runs without a P, so write barriers are not allowed. 1635 //go:nowritebarrier 1636 func handoffp(_p_ *p) { 1637 // handoffp must start an M in any situation where 1638 // findrunnable would return a G to run on _p_. 1639 1640 // if it has local work, start it straight away 1641 if !runqempty(_p_) || sched.runqsize != 0 { 1642 startm(_p_, false) 1643 return 1644 } 1645 // if it has GC work, start it straight away 1646 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 1647 startm(_p_, false) 1648 return 1649 } 1650 // no local work, check that there are no spinning/idle M's, 1651 // otherwise our help is not required 1652 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 1653 startm(_p_, true) 1654 return 1655 } 1656 lock(&sched.lock) 1657 if sched.gcwaiting != 0 { 1658 _p_.status = _Pgcstop 1659 sched.stopwait-- 1660 if sched.stopwait == 0 { 1661 notewakeup(&sched.stopnote) 1662 } 1663 unlock(&sched.lock) 1664 return 1665 } 1666 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 1667 sched.safePointFn(_p_) 1668 sched.safePointWait-- 1669 if sched.safePointWait == 0 { 1670 notewakeup(&sched.safePointNote) 1671 } 1672 } 1673 if sched.runqsize != 0 { 1674 unlock(&sched.lock) 1675 startm(_p_, false) 1676 return 1677 } 1678 // If this is the last running P and nobody is polling network, 1679 // need to wakeup another M to poll network. 1680 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 1681 unlock(&sched.lock) 1682 startm(_p_, false) 1683 return 1684 } 1685 pidleput(_p_) 1686 unlock(&sched.lock) 1687 } 1688 1689 // Tries to add one more P to execute G's. 1690 // Called when a G is made runnable (newproc, ready). 1691 func wakep() { 1692 // be conservative about spinning threads 1693 if !atomic.Cas(&sched.nmspinning, 0, 1) { 1694 return 1695 } 1696 startm(nil, true) 1697 } 1698 1699 // Stops execution of the current m that is locked to a g until the g is runnable again. 1700 // Returns with acquired P. 1701 func stoplockedm() { 1702 _g_ := getg() 1703 1704 if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m { 1705 throw("stoplockedm: inconsistent locking") 1706 } 1707 if _g_.m.p != 0 { 1708 // Schedule another M to run this p. 1709 _p_ := releasep() 1710 handoffp(_p_) 1711 } 1712 incidlelocked(1) 1713 // Wait until another thread schedules lockedg again. 1714 notesleep(&_g_.m.park) 1715 noteclear(&_g_.m.park) 1716 status := readgstatus(_g_.m.lockedg) 1717 if status&^_Gscan != _Grunnable { 1718 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 1719 dumpgstatus(_g_) 1720 throw("stoplockedm: not runnable") 1721 } 1722 acquirep(_g_.m.nextp.ptr()) 1723 _g_.m.nextp = 0 1724 } 1725 1726 // Schedules the locked m to run the locked gp. 1727 // May run during STW, so write barriers are not allowed. 1728 //go:nowritebarrier 1729 func startlockedm(gp *g) { 1730 _g_ := getg() 1731 1732 mp := gp.lockedm 1733 if mp == _g_.m { 1734 throw("startlockedm: locked to me") 1735 } 1736 if mp.nextp != 0 { 1737 throw("startlockedm: m has p") 1738 } 1739 // directly handoff current P to the locked m 1740 incidlelocked(-1) 1741 _p_ := releasep() 1742 mp.nextp.set(_p_) 1743 notewakeup(&mp.park) 1744 stopm() 1745 } 1746 1747 // Stops the current m for stopTheWorld. 1748 // Returns when the world is restarted. 1749 func gcstopm() { 1750 _g_ := getg() 1751 1752 if sched.gcwaiting == 0 { 1753 throw("gcstopm: not waiting for gc") 1754 } 1755 if _g_.m.spinning { 1756 _g_.m.spinning = false 1757 // OK to just drop nmspinning here, 1758 // startTheWorld will unpark threads as necessary. 1759 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1760 throw("gcstopm: negative nmspinning") 1761 } 1762 } 1763 _p_ := releasep() 1764 lock(&sched.lock) 1765 _p_.status = _Pgcstop 1766 sched.stopwait-- 1767 if sched.stopwait == 0 { 1768 notewakeup(&sched.stopnote) 1769 } 1770 unlock(&sched.lock) 1771 stopm() 1772 } 1773 1774 // Schedules gp to run on the current M. 1775 // If inheritTime is true, gp inherits the remaining time in the 1776 // current time slice. Otherwise, it starts a new time slice. 1777 // Never returns. 1778 func execute(gp *g, inheritTime bool) { 1779 _g_ := getg() 1780 1781 casgstatus(gp, _Grunnable, _Grunning) 1782 gp.waitsince = 0 1783 gp.preempt = false 1784 gp.stackguard0 = gp.stack.lo + _StackGuard 1785 if !inheritTime { 1786 _g_.m.p.ptr().schedtick++ 1787 } 1788 _g_.m.curg = gp 1789 gp.m = _g_.m 1790 1791 // Check whether the profiler needs to be turned on or off. 1792 hz := sched.profilehz 1793 if _g_.m.profilehz != hz { 1794 resetcpuprofiler(hz) 1795 } 1796 1797 if trace.enabled { 1798 // GoSysExit has to happen when we have a P, but before GoStart. 1799 // So we emit it here. 1800 if gp.syscallsp != 0 && gp.sysblocktraced { 1801 traceGoSysExit(gp.sysexitticks) 1802 } 1803 traceGoStart() 1804 } 1805 1806 gogo(&gp.sched) 1807 } 1808 1809 // Finds a runnable goroutine to execute. 1810 // Tries to steal from other P's, get g from global queue, poll network. 1811 func findrunnable() (gp *g, inheritTime bool) { 1812 _g_ := getg() 1813 1814 // The conditions here and in handoffp must agree: if 1815 // findrunnable would return a G to run, handoffp must start 1816 // an M. 1817 1818 top: 1819 _p_ := _g_.m.p.ptr() 1820 if sched.gcwaiting != 0 { 1821 gcstopm() 1822 goto top 1823 } 1824 if _p_.runSafePointFn != 0 { 1825 runSafePointFn() 1826 } 1827 if fingwait && fingwake { 1828 if gp := wakefing(); gp != nil { 1829 ready(gp, 0, true) 1830 } 1831 } 1832 1833 // local runq 1834 if gp, inheritTime := runqget(_p_); gp != nil { 1835 return gp, inheritTime 1836 } 1837 1838 // global runq 1839 if sched.runqsize != 0 { 1840 lock(&sched.lock) 1841 gp := globrunqget(_p_, 0) 1842 unlock(&sched.lock) 1843 if gp != nil { 1844 return gp, false 1845 } 1846 } 1847 1848 // Poll network. 1849 // This netpoll is only an optimization before we resort to stealing. 1850 // We can safely skip it if there a thread blocked in netpoll already. 1851 // If there is any kind of logical race with that blocked thread 1852 // (e.g. it has already returned from netpoll, but does not set lastpoll yet), 1853 // this thread will do blocking netpoll below anyway. 1854 if netpollinited() && sched.lastpoll != 0 { 1855 if gp := netpoll(false); gp != nil { // non-blocking 1856 // netpoll returns list of goroutines linked by schedlink. 1857 injectglist(gp.schedlink.ptr()) 1858 casgstatus(gp, _Gwaiting, _Grunnable) 1859 if trace.enabled { 1860 traceGoUnpark(gp, 0) 1861 } 1862 return gp, false 1863 } 1864 } 1865 1866 // Steal work from other P's. 1867 procs := uint32(gomaxprocs) 1868 if atomic.Load(&sched.npidle) == procs-1 { 1869 // Either GOMAXPROCS=1 or everybody, except for us, is idle already. 1870 // New work can appear from returning syscall/cgocall, network or timers. 1871 // Neither of that submits to local run queues, so no point in stealing. 1872 goto stop 1873 } 1874 // If number of spinning M's >= number of busy P's, block. 1875 // This is necessary to prevent excessive CPU consumption 1876 // when GOMAXPROCS>>1 but the program parallelism is low. 1877 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { // TODO: fast atomic 1878 goto stop 1879 } 1880 if !_g_.m.spinning { 1881 _g_.m.spinning = true 1882 atomic.Xadd(&sched.nmspinning, 1) 1883 } 1884 for i := 0; i < 4; i++ { 1885 for enum := stealOrder.start(fastrand1()); !enum.done(); enum.next() { 1886 if sched.gcwaiting != 0 { 1887 goto top 1888 } 1889 stealRunNextG := i > 2 // first look for ready queues with more than 1 g 1890 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil { 1891 return gp, false 1892 } 1893 } 1894 } 1895 1896 stop: 1897 1898 // We have nothing to do. If we're in the GC mark phase, can 1899 // safely scan and blacken objects, and have work to do, run 1900 // idle-time marking rather than give up the P. 1901 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) { 1902 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 1903 gp := _p_.gcBgMarkWorker.ptr() 1904 casgstatus(gp, _Gwaiting, _Grunnable) 1905 if trace.enabled { 1906 traceGoUnpark(gp, 0) 1907 } 1908 return gp, false 1909 } 1910 1911 // return P and block 1912 lock(&sched.lock) 1913 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { 1914 unlock(&sched.lock) 1915 goto top 1916 } 1917 if sched.runqsize != 0 { 1918 gp := globrunqget(_p_, 0) 1919 unlock(&sched.lock) 1920 return gp, false 1921 } 1922 if releasep() != _p_ { 1923 throw("findrunnable: wrong p") 1924 } 1925 pidleput(_p_) 1926 unlock(&sched.lock) 1927 1928 // Delicate dance: thread transitions from spinning to non-spinning state, 1929 // potentially concurrently with submission of new goroutines. We must 1930 // drop nmspinning first and then check all per-P queues again (with 1931 // #StoreLoad memory barrier in between). If we do it the other way around, 1932 // another thread can submit a goroutine after we've checked all run queues 1933 // but before we drop nmspinning; as the result nobody will unpark a thread 1934 // to run the goroutine. 1935 // If we discover new work below, we need to restore m.spinning as a signal 1936 // for resetspinning to unpark a new worker thread (because there can be more 1937 // than one starving goroutine). However, if after discovering new work 1938 // we also observe no idle Ps, it is OK to just park the current thread: 1939 // the system is fully loaded so no spinning threads are required. 1940 // Also see "Worker thread parking/unparking" comment at the top of the file. 1941 wasSpinning := _g_.m.spinning 1942 if _g_.m.spinning { 1943 _g_.m.spinning = false 1944 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1945 throw("findrunnable: negative nmspinning") 1946 } 1947 } 1948 1949 // check all runqueues once again 1950 for i := 0; i < int(gomaxprocs); i++ { 1951 _p_ := allp[i] 1952 if _p_ != nil && !runqempty(_p_) { 1953 lock(&sched.lock) 1954 _p_ = pidleget() 1955 unlock(&sched.lock) 1956 if _p_ != nil { 1957 acquirep(_p_) 1958 if wasSpinning { 1959 _g_.m.spinning = true 1960 atomic.Xadd(&sched.nmspinning, 1) 1961 } 1962 goto top 1963 } 1964 break 1965 } 1966 } 1967 1968 // poll network 1969 if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 1970 if _g_.m.p != 0 { 1971 throw("findrunnable: netpoll with p") 1972 } 1973 if _g_.m.spinning { 1974 throw("findrunnable: netpoll with spinning") 1975 } 1976 gp := netpoll(true) // block until new work is available 1977 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 1978 if gp != nil { 1979 lock(&sched.lock) 1980 _p_ = pidleget() 1981 unlock(&sched.lock) 1982 if _p_ != nil { 1983 acquirep(_p_) 1984 injectglist(gp.schedlink.ptr()) 1985 casgstatus(gp, _Gwaiting, _Grunnable) 1986 if trace.enabled { 1987 traceGoUnpark(gp, 0) 1988 } 1989 return gp, false 1990 } 1991 injectglist(gp) 1992 } 1993 } 1994 stopm() 1995 goto top 1996 } 1997 1998 func resetspinning() { 1999 _g_ := getg() 2000 if !_g_.m.spinning { 2001 throw("resetspinning: not a spinning m") 2002 } 2003 _g_.m.spinning = false 2004 nmspinning := atomic.Xadd(&sched.nmspinning, -1) 2005 if int32(nmspinning) < 0 { 2006 throw("findrunnable: negative nmspinning") 2007 } 2008 // M wakeup policy is deliberately somewhat conservative, so check if we 2009 // need to wakeup another P here. See "Worker thread parking/unparking" 2010 // comment at the top of the file for details. 2011 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 { 2012 wakep() 2013 } 2014 } 2015 2016 // Injects the list of runnable G's into the scheduler. 2017 // Can run concurrently with GC. 2018 func injectglist(glist *g) { 2019 if glist == nil { 2020 return 2021 } 2022 if trace.enabled { 2023 for gp := glist; gp != nil; gp = gp.schedlink.ptr() { 2024 traceGoUnpark(gp, 0) 2025 } 2026 } 2027 lock(&sched.lock) 2028 var n int 2029 for n = 0; glist != nil; n++ { 2030 gp := glist 2031 glist = gp.schedlink.ptr() 2032 casgstatus(gp, _Gwaiting, _Grunnable) 2033 globrunqput(gp) 2034 } 2035 unlock(&sched.lock) 2036 for ; n != 0 && sched.npidle != 0; n-- { 2037 startm(nil, false) 2038 } 2039 } 2040 2041 // One round of scheduler: find a runnable goroutine and execute it. 2042 // Never returns. 2043 func schedule() { 2044 _g_ := getg() 2045 2046 if _g_.m.locks != 0 { 2047 throw("schedule: holding locks") 2048 } 2049 2050 if _g_.m.lockedg != nil { 2051 stoplockedm() 2052 execute(_g_.m.lockedg, false) // Never returns. 2053 } 2054 2055 top: 2056 if sched.gcwaiting != 0 { 2057 gcstopm() 2058 goto top 2059 } 2060 if _g_.m.p.ptr().runSafePointFn != 0 { 2061 runSafePointFn() 2062 } 2063 2064 var gp *g 2065 var inheritTime bool 2066 if trace.enabled || trace.shutdown { 2067 gp = traceReader() 2068 if gp != nil { 2069 casgstatus(gp, _Gwaiting, _Grunnable) 2070 traceGoUnpark(gp, 0) 2071 } 2072 } 2073 if gp == nil && gcBlackenEnabled != 0 { 2074 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 2075 } 2076 if gp == nil { 2077 // Check the global runnable queue once in a while to ensure fairness. 2078 // Otherwise two goroutines can completely occupy the local runqueue 2079 // by constantly respawning each other. 2080 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 2081 lock(&sched.lock) 2082 gp = globrunqget(_g_.m.p.ptr(), 1) 2083 unlock(&sched.lock) 2084 } 2085 } 2086 if gp == nil { 2087 gp, inheritTime = runqget(_g_.m.p.ptr()) 2088 if gp != nil && _g_.m.spinning { 2089 throw("schedule: spinning with local work") 2090 } 2091 } 2092 if gp == nil { 2093 gp, inheritTime = findrunnable() // blocks until work is available 2094 } 2095 2096 // This thread is going to run a goroutine and is not spinning anymore, 2097 // so if it was marked as spinning we need to reset it now and potentially 2098 // start a new spinning M. 2099 if _g_.m.spinning { 2100 resetspinning() 2101 } 2102 2103 if gp.lockedm != nil { 2104 // Hands off own p to the locked m, 2105 // then blocks waiting for a new p. 2106 startlockedm(gp) 2107 goto top 2108 } 2109 2110 execute(gp, inheritTime) 2111 } 2112 2113 // dropg removes the association between m and the current goroutine m->curg (gp for short). 2114 // Typically a caller sets gp's status away from Grunning and then 2115 // immediately calls dropg to finish the job. The caller is also responsible 2116 // for arranging that gp will be restarted using ready at an 2117 // appropriate time. After calling dropg and arranging for gp to be 2118 // readied later, the caller can do other work but eventually should 2119 // call schedule to restart the scheduling of goroutines on this m. 2120 func dropg() { 2121 _g_ := getg() 2122 2123 _g_.m.curg.m = nil 2124 _g_.m.curg = nil 2125 } 2126 2127 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 2128 unlock((*mutex)(lock)) 2129 return true 2130 } 2131 2132 // park continuation on g0. 2133 func park_m(gp *g) { 2134 _g_ := getg() 2135 2136 if trace.enabled { 2137 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp) 2138 } 2139 2140 casgstatus(gp, _Grunning, _Gwaiting) 2141 dropg() 2142 2143 if _g_.m.waitunlockf != nil { 2144 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf)) 2145 ok := fn(gp, _g_.m.waitlock) 2146 _g_.m.waitunlockf = nil 2147 _g_.m.waitlock = nil 2148 if !ok { 2149 if trace.enabled { 2150 traceGoUnpark(gp, 2) 2151 } 2152 casgstatus(gp, _Gwaiting, _Grunnable) 2153 execute(gp, true) // Schedule it back, never returns. 2154 } 2155 } 2156 schedule() 2157 } 2158 2159 func goschedImpl(gp *g) { 2160 status := readgstatus(gp) 2161 if status&^_Gscan != _Grunning { 2162 dumpgstatus(gp) 2163 throw("bad g status") 2164 } 2165 casgstatus(gp, _Grunning, _Grunnable) 2166 dropg() 2167 lock(&sched.lock) 2168 globrunqput(gp) 2169 unlock(&sched.lock) 2170 2171 schedule() 2172 } 2173 2174 // Gosched continuation on g0. 2175 func gosched_m(gp *g) { 2176 if trace.enabled { 2177 traceGoSched() 2178 } 2179 goschedImpl(gp) 2180 } 2181 2182 func gopreempt_m(gp *g) { 2183 if trace.enabled { 2184 traceGoPreempt() 2185 } 2186 goschedImpl(gp) 2187 } 2188 2189 // Finishes execution of the current goroutine. 2190 func goexit1() { 2191 if raceenabled { 2192 racegoend() 2193 } 2194 if trace.enabled { 2195 traceGoEnd() 2196 } 2197 mcall(goexit0) 2198 } 2199 2200 // goexit continuation on g0. 2201 func goexit0(gp *g) { 2202 _g_ := getg() 2203 2204 casgstatus(gp, _Grunning, _Gdead) 2205 if isSystemGoroutine(gp) { 2206 atomic.Xadd(&sched.ngsys, -1) 2207 } 2208 gp.m = nil 2209 gp.lockedm = nil 2210 _g_.m.lockedg = nil 2211 gp.paniconfault = false 2212 gp._defer = nil // should be true already but just in case. 2213 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 2214 gp.writebuf = nil 2215 gp.waitreason = "" 2216 gp.param = nil 2217 2218 // Note that gp's stack scan is now "valid" because it has no 2219 // stack. We could dequeueRescan, but that takes a lock and 2220 // isn't really necessary. 2221 gp.gcscanvalid = true 2222 dropg() 2223 2224 if _g_.m.locked&^_LockExternal != 0 { 2225 print("invalid m->locked = ", _g_.m.locked, "\n") 2226 throw("internal lockOSThread error") 2227 } 2228 _g_.m.locked = 0 2229 gfput(_g_.m.p.ptr(), gp) 2230 schedule() 2231 } 2232 2233 //go:nosplit 2234 //go:nowritebarrier 2235 func save(pc, sp uintptr) { 2236 _g_ := getg() 2237 2238 _g_.sched.pc = pc 2239 _g_.sched.sp = sp 2240 _g_.sched.lr = 0 2241 _g_.sched.ret = 0 2242 _g_.sched.ctxt = nil 2243 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 2244 } 2245 2246 // The goroutine g is about to enter a system call. 2247 // Record that it's not using the cpu anymore. 2248 // This is called only from the go syscall library and cgocall, 2249 // not from the low-level system calls used by the runtime. 2250 // 2251 // Entersyscall cannot split the stack: the gosave must 2252 // make g->sched refer to the caller's stack segment, because 2253 // entersyscall is going to return immediately after. 2254 // 2255 // Nothing entersyscall calls can split the stack either. 2256 // We cannot safely move the stack during an active call to syscall, 2257 // because we do not know which of the uintptr arguments are 2258 // really pointers (back into the stack). 2259 // In practice, this means that we make the fast path run through 2260 // entersyscall doing no-split things, and the slow path has to use systemstack 2261 // to run bigger things on the system stack. 2262 // 2263 // reentersyscall is the entry point used by cgo callbacks, where explicitly 2264 // saved SP and PC are restored. This is needed when exitsyscall will be called 2265 // from a function further up in the call stack than the parent, as g->syscallsp 2266 // must always point to a valid stack frame. entersyscall below is the normal 2267 // entry point for syscalls, which obtains the SP and PC from the caller. 2268 // 2269 // Syscall tracing: 2270 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 2271 // If the syscall does not block, that is it, we do not emit any other events. 2272 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 2273 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 2274 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 2275 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 2276 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 2277 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 2278 // and we wait for the increment before emitting traceGoSysExit. 2279 // Note that the increment is done even if tracing is not enabled, 2280 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 2281 // 2282 //go:nosplit 2283 func reentersyscall(pc, sp uintptr) { 2284 _g_ := getg() 2285 2286 // Disable preemption because during this function g is in Gsyscall status, 2287 // but can have inconsistent g->sched, do not let GC observe it. 2288 _g_.m.locks++ 2289 2290 // Entersyscall must not call any function that might split/grow the stack. 2291 // (See details in comment above.) 2292 // Catch calls that might, by replacing the stack guard with something that 2293 // will trip any stack check and leaving a flag to tell newstack to die. 2294 _g_.stackguard0 = stackPreempt 2295 _g_.throwsplit = true 2296 2297 // Leave SP around for GC and traceback. 2298 save(pc, sp) 2299 _g_.syscallsp = sp 2300 _g_.syscallpc = pc 2301 casgstatus(_g_, _Grunning, _Gsyscall) 2302 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2303 systemstack(func() { 2304 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2305 throw("entersyscall") 2306 }) 2307 } 2308 2309 if trace.enabled { 2310 systemstack(traceGoSysCall) 2311 // systemstack itself clobbers g.sched.{pc,sp} and we might 2312 // need them later when the G is genuinely blocked in a 2313 // syscall 2314 save(pc, sp) 2315 } 2316 2317 if atomic.Load(&sched.sysmonwait) != 0 { // TODO: fast atomic 2318 systemstack(entersyscall_sysmon) 2319 save(pc, sp) 2320 } 2321 2322 if _g_.m.p.ptr().runSafePointFn != 0 { 2323 // runSafePointFn may stack split if run on this stack 2324 systemstack(runSafePointFn) 2325 save(pc, sp) 2326 } 2327 2328 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2329 _g_.sysblocktraced = true 2330 _g_.m.mcache = nil 2331 _g_.m.p.ptr().m = 0 2332 atomic.Store(&_g_.m.p.ptr().status, _Psyscall) 2333 if sched.gcwaiting != 0 { 2334 systemstack(entersyscall_gcwait) 2335 save(pc, sp) 2336 } 2337 2338 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). 2339 // We set _StackGuard to StackPreempt so that first split stack check calls morestack. 2340 // Morestack detects this case and throws. 2341 _g_.stackguard0 = stackPreempt 2342 _g_.m.locks-- 2343 } 2344 2345 // Standard syscall entry used by the go syscall library and normal cgo calls. 2346 //go:nosplit 2347 func entersyscall(dummy int32) { 2348 reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2349 } 2350 2351 func entersyscall_sysmon() { 2352 lock(&sched.lock) 2353 if atomic.Load(&sched.sysmonwait) != 0 { 2354 atomic.Store(&sched.sysmonwait, 0) 2355 notewakeup(&sched.sysmonnote) 2356 } 2357 unlock(&sched.lock) 2358 } 2359 2360 func entersyscall_gcwait() { 2361 _g_ := getg() 2362 _p_ := _g_.m.p.ptr() 2363 2364 lock(&sched.lock) 2365 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 2366 if trace.enabled { 2367 traceGoSysBlock(_p_) 2368 traceProcStop(_p_) 2369 } 2370 _p_.syscalltick++ 2371 if sched.stopwait--; sched.stopwait == 0 { 2372 notewakeup(&sched.stopnote) 2373 } 2374 } 2375 unlock(&sched.lock) 2376 } 2377 2378 // The same as entersyscall(), but with a hint that the syscall is blocking. 2379 //go:nosplit 2380 func entersyscallblock(dummy int32) { 2381 _g_ := getg() 2382 2383 _g_.m.locks++ // see comment in entersyscall 2384 _g_.throwsplit = true 2385 _g_.stackguard0 = stackPreempt // see comment in entersyscall 2386 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2387 _g_.sysblocktraced = true 2388 _g_.m.p.ptr().syscalltick++ 2389 2390 // Leave SP around for GC and traceback. 2391 pc := getcallerpc(unsafe.Pointer(&dummy)) 2392 sp := getcallersp(unsafe.Pointer(&dummy)) 2393 save(pc, sp) 2394 _g_.syscallsp = _g_.sched.sp 2395 _g_.syscallpc = _g_.sched.pc 2396 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2397 sp1 := sp 2398 sp2 := _g_.sched.sp 2399 sp3 := _g_.syscallsp 2400 systemstack(func() { 2401 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2402 throw("entersyscallblock") 2403 }) 2404 } 2405 casgstatus(_g_, _Grunning, _Gsyscall) 2406 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2407 systemstack(func() { 2408 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2409 throw("entersyscallblock") 2410 }) 2411 } 2412 2413 systemstack(entersyscallblock_handoff) 2414 2415 // Resave for traceback during blocked call. 2416 save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2417 2418 _g_.m.locks-- 2419 } 2420 2421 func entersyscallblock_handoff() { 2422 if trace.enabled { 2423 traceGoSysCall() 2424 traceGoSysBlock(getg().m.p.ptr()) 2425 } 2426 handoffp(releasep()) 2427 } 2428 2429 // The goroutine g exited its system call. 2430 // Arrange for it to run on a cpu again. 2431 // This is called only from the go syscall library, not 2432 // from the low-level system calls used by the runtime. 2433 //go:nosplit 2434 func exitsyscall(dummy int32) { 2435 _g_ := getg() 2436 2437 _g_.m.locks++ // see comment in entersyscall 2438 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp { 2439 // throw calls print which may try to grow the stack, 2440 // but throwsplit == true so the stack can not be grown; 2441 // use systemstack to avoid that possible problem. 2442 systemstack(func() { 2443 throw("exitsyscall: syscall frame is no longer valid") 2444 }) 2445 } 2446 2447 _g_.waitsince = 0 2448 oldp := _g_.m.p.ptr() 2449 if exitsyscallfast() { 2450 if _g_.m.mcache == nil { 2451 throw("lost mcache") 2452 } 2453 if trace.enabled { 2454 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2455 systemstack(traceGoStart) 2456 } 2457 } 2458 // There's a cpu for us, so we can run. 2459 _g_.m.p.ptr().syscalltick++ 2460 // We need to cas the status and scan before resuming... 2461 casgstatus(_g_, _Gsyscall, _Grunning) 2462 2463 // Garbage collector isn't running (since we are), 2464 // so okay to clear syscallsp. 2465 _g_.syscallsp = 0 2466 _g_.m.locks-- 2467 if _g_.preempt { 2468 // restore the preemption request in case we've cleared it in newstack 2469 _g_.stackguard0 = stackPreempt 2470 } else { 2471 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 2472 _g_.stackguard0 = _g_.stack.lo + _StackGuard 2473 } 2474 _g_.throwsplit = false 2475 return 2476 } 2477 2478 _g_.sysexitticks = 0 2479 if trace.enabled { 2480 // Wait till traceGoSysBlock event is emitted. 2481 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2482 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 2483 osyield() 2484 } 2485 // We can't trace syscall exit right now because we don't have a P. 2486 // Tracing code can invoke write barriers that cannot run without a P. 2487 // So instead we remember the syscall exit time and emit the event 2488 // in execute when we have a P. 2489 _g_.sysexitticks = cputicks() 2490 } 2491 2492 _g_.m.locks-- 2493 2494 // Call the scheduler. 2495 mcall(exitsyscall0) 2496 2497 if _g_.m.mcache == nil { 2498 throw("lost mcache") 2499 } 2500 2501 // Scheduler returned, so we're allowed to run now. 2502 // Delete the syscallsp information that we left for 2503 // the garbage collector during the system call. 2504 // Must wait until now because until gosched returns 2505 // we don't know for sure that the garbage collector 2506 // is not running. 2507 _g_.syscallsp = 0 2508 _g_.m.p.ptr().syscalltick++ 2509 _g_.throwsplit = false 2510 } 2511 2512 //go:nosplit 2513 func exitsyscallfast() bool { 2514 _g_ := getg() 2515 2516 // Freezetheworld sets stopwait but does not retake P's. 2517 if sched.stopwait == freezeStopWait { 2518 _g_.m.mcache = nil 2519 _g_.m.p = 0 2520 return false 2521 } 2522 2523 // Try to re-acquire the last P. 2524 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) { 2525 // There's a cpu for us, so we can run. 2526 _g_.m.mcache = _g_.m.p.ptr().mcache 2527 _g_.m.p.ptr().m.set(_g_.m) 2528 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2529 if trace.enabled { 2530 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 2531 // traceGoSysBlock for this syscall was already emitted, 2532 // but here we effectively retake the p from the new syscall running on the same p. 2533 systemstack(func() { 2534 // Denote blocking of the new syscall. 2535 traceGoSysBlock(_g_.m.p.ptr()) 2536 // Denote completion of the current syscall. 2537 traceGoSysExit(0) 2538 }) 2539 } 2540 _g_.m.p.ptr().syscalltick++ 2541 } 2542 return true 2543 } 2544 2545 // Try to get any other idle P. 2546 oldp := _g_.m.p.ptr() 2547 _g_.m.mcache = nil 2548 _g_.m.p = 0 2549 if sched.pidle != 0 { 2550 var ok bool 2551 systemstack(func() { 2552 ok = exitsyscallfast_pidle() 2553 if ok && trace.enabled { 2554 if oldp != nil { 2555 // Wait till traceGoSysBlock event is emitted. 2556 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2557 for oldp.syscalltick == _g_.m.syscalltick { 2558 osyield() 2559 } 2560 } 2561 traceGoSysExit(0) 2562 } 2563 }) 2564 if ok { 2565 return true 2566 } 2567 } 2568 return false 2569 } 2570 2571 func exitsyscallfast_pidle() bool { 2572 lock(&sched.lock) 2573 _p_ := pidleget() 2574 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 2575 atomic.Store(&sched.sysmonwait, 0) 2576 notewakeup(&sched.sysmonnote) 2577 } 2578 unlock(&sched.lock) 2579 if _p_ != nil { 2580 acquirep(_p_) 2581 return true 2582 } 2583 return false 2584 } 2585 2586 // exitsyscall slow path on g0. 2587 // Failed to acquire P, enqueue gp as runnable. 2588 func exitsyscall0(gp *g) { 2589 _g_ := getg() 2590 2591 casgstatus(gp, _Gsyscall, _Grunnable) 2592 dropg() 2593 lock(&sched.lock) 2594 _p_ := pidleget() 2595 if _p_ == nil { 2596 globrunqput(gp) 2597 } else if atomic.Load(&sched.sysmonwait) != 0 { 2598 atomic.Store(&sched.sysmonwait, 0) 2599 notewakeup(&sched.sysmonnote) 2600 } 2601 unlock(&sched.lock) 2602 if _p_ != nil { 2603 acquirep(_p_) 2604 execute(gp, false) // Never returns. 2605 } 2606 if _g_.m.lockedg != nil { 2607 // Wait until another thread schedules gp and so m again. 2608 stoplockedm() 2609 execute(gp, false) // Never returns. 2610 } 2611 stopm() 2612 schedule() // Never returns. 2613 } 2614 2615 func beforefork() { 2616 gp := getg().m.curg 2617 2618 // Fork can hang if preempted with signals frequently enough (see issue 5517). 2619 // Ensure that we stay on the same M where we disable profiling. 2620 gp.m.locks++ 2621 if gp.m.profilehz != 0 { 2622 resetcpuprofiler(0) 2623 } 2624 2625 // This function is called before fork in syscall package. 2626 // Code between fork and exec must not allocate memory nor even try to grow stack. 2627 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 2628 // runtime_AfterFork will undo this in parent process, but not in child. 2629 gp.stackguard0 = stackFork 2630 } 2631 2632 // Called from syscall package before fork. 2633 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 2634 //go:nosplit 2635 func syscall_runtime_BeforeFork() { 2636 systemstack(beforefork) 2637 } 2638 2639 func afterfork() { 2640 gp := getg().m.curg 2641 2642 // See the comment in beforefork. 2643 gp.stackguard0 = gp.stack.lo + _StackGuard 2644 2645 hz := sched.profilehz 2646 if hz != 0 { 2647 resetcpuprofiler(hz) 2648 } 2649 gp.m.locks-- 2650 } 2651 2652 // Called from syscall package after fork in parent. 2653 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 2654 //go:nosplit 2655 func syscall_runtime_AfterFork() { 2656 systemstack(afterfork) 2657 } 2658 2659 // Allocate a new g, with a stack big enough for stacksize bytes. 2660 func malg(stacksize int32) *g { 2661 newg := new(g) 2662 if stacksize >= 0 { 2663 stacksize = round2(_StackSystem + stacksize) 2664 systemstack(func() { 2665 newg.stack, newg.stkbar = stackalloc(uint32(stacksize)) 2666 }) 2667 newg.stackguard0 = newg.stack.lo + _StackGuard 2668 newg.stackguard1 = ^uintptr(0) 2669 newg.stackAlloc = uintptr(stacksize) 2670 } 2671 return newg 2672 } 2673 2674 // Create a new g running fn with siz bytes of arguments. 2675 // Put it on the queue of g's waiting to run. 2676 // The compiler turns a go statement into a call to this. 2677 // Cannot split the stack because it assumes that the arguments 2678 // are available sequentially after &fn; they would not be 2679 // copied if a stack split occurred. 2680 //go:nosplit 2681 func newproc(siz int32, fn *funcval) { 2682 argp := add(unsafe.Pointer(&fn), sys.PtrSize) 2683 pc := getcallerpc(unsafe.Pointer(&siz)) 2684 systemstack(func() { 2685 newproc1(fn, (*uint8)(argp), siz, 0, pc) 2686 }) 2687 } 2688 2689 // Create a new g running fn with narg bytes of arguments starting 2690 // at argp and returning nret bytes of results. callerpc is the 2691 // address of the go statement that created this. The new g is put 2692 // on the queue of g's waiting to run. 2693 func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g { 2694 _g_ := getg() 2695 2696 if fn == nil { 2697 _g_.m.throwing = -1 // do not dump full stacks 2698 throw("go of nil func value") 2699 } 2700 _g_.m.locks++ // disable preemption because it can be holding p in a local var 2701 siz := narg + nret 2702 siz = (siz + 7) &^ 7 2703 2704 // We could allocate a larger initial stack if necessary. 2705 // Not worth it: this is almost always an error. 2706 // 4*sizeof(uintreg): extra space added below 2707 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 2708 if siz >= _StackMin-4*sys.RegSize-sys.RegSize { 2709 throw("newproc: function arguments too large for new goroutine") 2710 } 2711 2712 _p_ := _g_.m.p.ptr() 2713 newg := gfget(_p_) 2714 if newg == nil { 2715 newg = malg(_StackMin) 2716 casgstatus(newg, _Gidle, _Gdead) 2717 newg.gcRescan = -1 2718 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 2719 } 2720 if newg.stack.hi == 0 { 2721 throw("newproc1: newg missing stack") 2722 } 2723 2724 if readgstatus(newg) != _Gdead { 2725 throw("newproc1: new g is not Gdead") 2726 } 2727 2728 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame 2729 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign 2730 sp := newg.stack.hi - totalSize 2731 spArg := sp 2732 if usesLR { 2733 // caller's LR 2734 *(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil 2735 prepGoExitFrame(sp) 2736 spArg += sys.MinFrameSize 2737 } 2738 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) 2739 2740 memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 2741 newg.sched.sp = sp 2742 newg.stktopsp = sp 2743 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 2744 newg.sched.g = guintptr(unsafe.Pointer(newg)) 2745 gostartcallfn(&newg.sched, fn) 2746 newg.gopc = callerpc 2747 newg.startpc = fn.fn 2748 if isSystemGoroutine(newg) { 2749 atomic.Xadd(&sched.ngsys, +1) 2750 } 2751 // The stack is dirty from the argument frame, so queue it for 2752 // scanning. Do this before setting it to runnable so we still 2753 // own the G. If we're recycling a G, it may already be on the 2754 // rescan list. 2755 if newg.gcRescan == -1 { 2756 queueRescan(newg) 2757 } else { 2758 // The recycled G is already on the rescan list. Just 2759 // mark the stack dirty. 2760 newg.gcscanvalid = false 2761 } 2762 casgstatus(newg, _Gdead, _Grunnable) 2763 2764 if _p_.goidcache == _p_.goidcacheend { 2765 // Sched.goidgen is the last allocated id, 2766 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 2767 // At startup sched.goidgen=0, so main goroutine receives goid=1. 2768 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 2769 _p_.goidcache -= _GoidCacheBatch - 1 2770 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 2771 } 2772 newg.goid = int64(_p_.goidcache) 2773 _p_.goidcache++ 2774 if raceenabled { 2775 newg.racectx = racegostart(callerpc) 2776 } 2777 if trace.enabled { 2778 traceGoCreate(newg, newg.startpc) 2779 } 2780 runqput(_p_, newg, true) 2781 2782 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic 2783 wakep() 2784 } 2785 _g_.m.locks-- 2786 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 2787 _g_.stackguard0 = stackPreempt 2788 } 2789 return newg 2790 } 2791 2792 // Put on gfree list. 2793 // If local list is too long, transfer a batch to the global list. 2794 func gfput(_p_ *p, gp *g) { 2795 if readgstatus(gp) != _Gdead { 2796 throw("gfput: bad status (not Gdead)") 2797 } 2798 2799 stksize := gp.stackAlloc 2800 2801 if stksize != _FixedStack { 2802 // non-standard stack size - free it. 2803 stackfree(gp.stack, gp.stackAlloc) 2804 gp.stack.lo = 0 2805 gp.stack.hi = 0 2806 gp.stackguard0 = 0 2807 gp.stkbar = nil 2808 gp.stkbarPos = 0 2809 } else { 2810 // Reset stack barriers. 2811 gp.stkbar = gp.stkbar[:0] 2812 gp.stkbarPos = 0 2813 } 2814 2815 gp.schedlink.set(_p_.gfree) 2816 _p_.gfree = gp 2817 _p_.gfreecnt++ 2818 if _p_.gfreecnt >= 64 { 2819 lock(&sched.gflock) 2820 for _p_.gfreecnt >= 32 { 2821 _p_.gfreecnt-- 2822 gp = _p_.gfree 2823 _p_.gfree = gp.schedlink.ptr() 2824 if gp.stack.lo == 0 { 2825 gp.schedlink.set(sched.gfreeNoStack) 2826 sched.gfreeNoStack = gp 2827 } else { 2828 gp.schedlink.set(sched.gfreeStack) 2829 sched.gfreeStack = gp 2830 } 2831 sched.ngfree++ 2832 } 2833 unlock(&sched.gflock) 2834 } 2835 } 2836 2837 // Get from gfree list. 2838 // If local list is empty, grab a batch from global list. 2839 func gfget(_p_ *p) *g { 2840 retry: 2841 gp := _p_.gfree 2842 if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) { 2843 lock(&sched.gflock) 2844 for _p_.gfreecnt < 32 { 2845 if sched.gfreeStack != nil { 2846 // Prefer Gs with stacks. 2847 gp = sched.gfreeStack 2848 sched.gfreeStack = gp.schedlink.ptr() 2849 } else if sched.gfreeNoStack != nil { 2850 gp = sched.gfreeNoStack 2851 sched.gfreeNoStack = gp.schedlink.ptr() 2852 } else { 2853 break 2854 } 2855 _p_.gfreecnt++ 2856 sched.ngfree-- 2857 gp.schedlink.set(_p_.gfree) 2858 _p_.gfree = gp 2859 } 2860 unlock(&sched.gflock) 2861 goto retry 2862 } 2863 if gp != nil { 2864 _p_.gfree = gp.schedlink.ptr() 2865 _p_.gfreecnt-- 2866 if gp.stack.lo == 0 { 2867 // Stack was deallocated in gfput. Allocate a new one. 2868 systemstack(func() { 2869 gp.stack, gp.stkbar = stackalloc(_FixedStack) 2870 }) 2871 gp.stackguard0 = gp.stack.lo + _StackGuard 2872 gp.stackAlloc = _FixedStack 2873 } else { 2874 if raceenabled { 2875 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) 2876 } 2877 if msanenabled { 2878 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) 2879 } 2880 } 2881 } 2882 return gp 2883 } 2884 2885 // Purge all cached G's from gfree list to the global list. 2886 func gfpurge(_p_ *p) { 2887 lock(&sched.gflock) 2888 for _p_.gfreecnt != 0 { 2889 _p_.gfreecnt-- 2890 gp := _p_.gfree 2891 _p_.gfree = gp.schedlink.ptr() 2892 if gp.stack.lo == 0 { 2893 gp.schedlink.set(sched.gfreeNoStack) 2894 sched.gfreeNoStack = gp 2895 } else { 2896 gp.schedlink.set(sched.gfreeStack) 2897 sched.gfreeStack = gp 2898 } 2899 sched.ngfree++ 2900 } 2901 unlock(&sched.gflock) 2902 } 2903 2904 // Breakpoint executes a breakpoint trap. 2905 func Breakpoint() { 2906 breakpoint() 2907 } 2908 2909 // dolockOSThread is called by LockOSThread and lockOSThread below 2910 // after they modify m.locked. Do not allow preemption during this call, 2911 // or else the m might be different in this function than in the caller. 2912 //go:nosplit 2913 func dolockOSThread() { 2914 _g_ := getg() 2915 _g_.m.lockedg = _g_ 2916 _g_.lockedm = _g_.m 2917 } 2918 2919 //go:nosplit 2920 2921 // LockOSThread wires the calling goroutine to its current operating system thread. 2922 // Until the calling goroutine exits or calls UnlockOSThread, it will always 2923 // execute in that thread, and no other goroutine can. 2924 func LockOSThread() { 2925 getg().m.locked |= _LockExternal 2926 dolockOSThread() 2927 } 2928 2929 //go:nosplit 2930 func lockOSThread() { 2931 getg().m.locked += _LockInternal 2932 dolockOSThread() 2933 } 2934 2935 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 2936 // after they update m->locked. Do not allow preemption during this call, 2937 // or else the m might be in different in this function than in the caller. 2938 //go:nosplit 2939 func dounlockOSThread() { 2940 _g_ := getg() 2941 if _g_.m.locked != 0 { 2942 return 2943 } 2944 _g_.m.lockedg = nil 2945 _g_.lockedm = nil 2946 } 2947 2948 //go:nosplit 2949 2950 // UnlockOSThread unwires the calling goroutine from its fixed operating system thread. 2951 // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op. 2952 func UnlockOSThread() { 2953 getg().m.locked &^= _LockExternal 2954 dounlockOSThread() 2955 } 2956 2957 //go:nosplit 2958 func unlockOSThread() { 2959 _g_ := getg() 2960 if _g_.m.locked < _LockInternal { 2961 systemstack(badunlockosthread) 2962 } 2963 _g_.m.locked -= _LockInternal 2964 dounlockOSThread() 2965 } 2966 2967 func badunlockosthread() { 2968 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 2969 } 2970 2971 func gcount() int32 { 2972 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys)) 2973 for i := 0; ; i++ { 2974 _p_ := allp[i] 2975 if _p_ == nil { 2976 break 2977 } 2978 n -= _p_.gfreecnt 2979 } 2980 2981 // All these variables can be changed concurrently, so the result can be inconsistent. 2982 // But at least the current goroutine is running. 2983 if n < 1 { 2984 n = 1 2985 } 2986 return n 2987 } 2988 2989 func mcount() int32 { 2990 return sched.mcount 2991 } 2992 2993 var prof struct { 2994 lock uint32 2995 hz int32 2996 } 2997 2998 func _System() { _System() } 2999 func _ExternalCode() { _ExternalCode() } 3000 func _GC() { _GC() } 3001 3002 // Called if we receive a SIGPROF signal. 3003 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 3004 if prof.hz == 0 { 3005 return 3006 } 3007 3008 // Profiling runs concurrently with GC, so it must not allocate. 3009 mp.mallocing++ 3010 3011 // Define that a "user g" is a user-created goroutine, and a "system g" 3012 // is one that is m->g0 or m->gsignal. 3013 // 3014 // We might be interrupted for profiling halfway through a 3015 // goroutine switch. The switch involves updating three (or four) values: 3016 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 3017 // because once it gets updated the new g is running. 3018 // 3019 // When switching from a user g to a system g, LR is not considered live, 3020 // so the update only affects g, SP, and PC. Since PC must be last, there 3021 // the possible partial transitions in ordinary execution are (1) g alone is updated, 3022 // (2) both g and SP are updated, and (3) SP alone is updated. 3023 // If SP or g alone is updated, we can detect the partial transition by checking 3024 // whether the SP is within g's stack bounds. (We could also require that SP 3025 // be changed only after g, but the stack bounds check is needed by other 3026 // cases, so there is no need to impose an additional requirement.) 3027 // 3028 // There is one exceptional transition to a system g, not in ordinary execution. 3029 // When a signal arrives, the operating system starts the signal handler running 3030 // with an updated PC and SP. The g is updated last, at the beginning of the 3031 // handler. There are two reasons this is okay. First, until g is updated the 3032 // g and SP do not match, so the stack bounds check detects the partial transition. 3033 // Second, signal handlers currently run with signals disabled, so a profiling 3034 // signal cannot arrive during the handler. 3035 // 3036 // When switching from a system g to a user g, there are three possibilities. 3037 // 3038 // First, it may be that the g switch has no PC update, because the SP 3039 // either corresponds to a user g throughout (as in asmcgocall) 3040 // or because it has been arranged to look like a user g frame 3041 // (as in cgocallback_gofunc). In this case, since the entire 3042 // transition is a g+SP update, a partial transition updating just one of 3043 // those will be detected by the stack bounds check. 3044 // 3045 // Second, when returning from a signal handler, the PC and SP updates 3046 // are performed by the operating system in an atomic update, so the g 3047 // update must be done before them. The stack bounds check detects 3048 // the partial transition here, and (again) signal handlers run with signals 3049 // disabled, so a profiling signal cannot arrive then anyway. 3050 // 3051 // Third, the common case: it may be that the switch updates g, SP, and PC 3052 // separately. If the PC is within any of the functions that does this, 3053 // we don't ask for a traceback. C.F. the function setsSP for more about this. 3054 // 3055 // There is another apparently viable approach, recorded here in case 3056 // the "PC within setsSP function" check turns out not to be usable. 3057 // It would be possible to delay the update of either g or SP until immediately 3058 // before the PC update instruction. Then, because of the stack bounds check, 3059 // the only problematic interrupt point is just before that PC update instruction, 3060 // and the sigprof handler can detect that instruction and simulate stepping past 3061 // it in order to reach a consistent state. On ARM, the update of g must be made 3062 // in two places (in R10 and also in a TLS slot), so the delayed update would 3063 // need to be the SP update. The sigprof handler must read the instruction at 3064 // the current PC and if it was the known instruction (for example, JMP BX or 3065 // MOV R2, PC), use that other register in place of the PC value. 3066 // The biggest drawback to this solution is that it requires that we can tell 3067 // whether it's safe to read from the memory pointed at by PC. 3068 // In a correct program, we can test PC == nil and otherwise read, 3069 // but if a profiling signal happens at the instant that a program executes 3070 // a bad jump (before the program manages to handle the resulting fault) 3071 // the profiling handler could fault trying to read nonexistent memory. 3072 // 3073 // To recap, there are no constraints on the assembly being used for the 3074 // transition. We simply require that g and SP match and that the PC is not 3075 // in gogo. 3076 traceback := true 3077 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) { 3078 traceback = false 3079 } 3080 var stk [maxCPUProfStack]uintptr 3081 var haveStackLock *g 3082 n := 0 3083 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 3084 cgoOff := 0 3085 // Check cgoCallersUse to make sure that we are not 3086 // interrupting other code that is fiddling with 3087 // cgoCallers. We are running in a signal handler 3088 // with all signals blocked, so we don't have to worry 3089 // about any other code interrupting us. 3090 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 { 3091 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { 3092 cgoOff++ 3093 } 3094 copy(stk[:], mp.cgoCallers[:cgoOff]) 3095 mp.cgoCallers[0] = 0 3096 } 3097 3098 // Collect Go stack that leads to the cgo call. 3099 if gcTryLockStackBarriers(mp.curg) { 3100 haveStackLock = mp.curg 3101 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0) 3102 } 3103 } else if traceback { 3104 var flags uint = _TraceTrap 3105 if gp.m.curg != nil && gcTryLockStackBarriers(gp.m.curg) { 3106 // It's safe to traceback the user stack. 3107 haveStackLock = gp.m.curg 3108 flags |= _TraceJumpStack 3109 } 3110 // Traceback is safe if we're on the system stack (if 3111 // necessary, flags will stop it before switching to 3112 // the user stack), or if we locked the user stack. 3113 if gp != gp.m.curg || haveStackLock != nil { 3114 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, flags) 3115 } 3116 } 3117 if haveStackLock != nil { 3118 gcUnlockStackBarriers(haveStackLock) 3119 } 3120 3121 if n <= 0 { 3122 // Normal traceback is impossible or has failed. 3123 // See if it falls into several common cases. 3124 n = 0 3125 if GOOS == "windows" && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 3126 // Libcall, i.e. runtime syscall on windows. 3127 // Collect Go stack that leads to the call. 3128 if gcTryLockStackBarriers(mp.libcallg.ptr()) { 3129 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 3130 gcUnlockStackBarriers(mp.libcallg.ptr()) 3131 } 3132 } 3133 if n == 0 { 3134 // If all of the above has failed, account it against abstract "System" or "GC". 3135 n = 2 3136 // "ExternalCode" is better than "etext". 3137 if pc > firstmoduledata.etext { 3138 pc = funcPC(_ExternalCode) + sys.PCQuantum 3139 } 3140 stk[0] = pc 3141 if mp.preemptoff != "" || mp.helpgc != 0 { 3142 stk[1] = funcPC(_GC) + sys.PCQuantum 3143 } else { 3144 stk[1] = funcPC(_System) + sys.PCQuantum 3145 } 3146 } 3147 } 3148 3149 if prof.hz != 0 { 3150 // Simple cas-lock to coordinate with setcpuprofilerate. 3151 for !atomic.Cas(&prof.lock, 0, 1) { 3152 osyield() 3153 } 3154 if prof.hz != 0 { 3155 cpuprof.add(stk[:n]) 3156 } 3157 atomic.Store(&prof.lock, 0) 3158 } 3159 mp.mallocing-- 3160 } 3161 3162 // Reports whether a function will set the SP 3163 // to an absolute value. Important that 3164 // we don't traceback when these are at the bottom 3165 // of the stack since we can't be sure that we will 3166 // find the caller. 3167 // 3168 // If the function is not on the bottom of the stack 3169 // we assume that it will have set it up so that traceback will be consistent, 3170 // either by being a traceback terminating function 3171 // or putting one on the stack at the right offset. 3172 func setsSP(pc uintptr) bool { 3173 f := findfunc(pc) 3174 if f == nil { 3175 // couldn't find the function for this PC, 3176 // so assume the worst and stop traceback 3177 return true 3178 } 3179 switch f.entry { 3180 case gogoPC, systemstackPC, mcallPC, morestackPC: 3181 return true 3182 } 3183 return false 3184 } 3185 3186 // Arrange to call fn with a traceback hz times a second. 3187 func setcpuprofilerate_m(hz int32) { 3188 // Force sane arguments. 3189 if hz < 0 { 3190 hz = 0 3191 } 3192 3193 // Disable preemption, otherwise we can be rescheduled to another thread 3194 // that has profiling enabled. 3195 _g_ := getg() 3196 _g_.m.locks++ 3197 3198 // Stop profiler on this thread so that it is safe to lock prof. 3199 // if a profiling signal came in while we had prof locked, 3200 // it would deadlock. 3201 resetcpuprofiler(0) 3202 3203 for !atomic.Cas(&prof.lock, 0, 1) { 3204 osyield() 3205 } 3206 prof.hz = hz 3207 atomic.Store(&prof.lock, 0) 3208 3209 lock(&sched.lock) 3210 sched.profilehz = hz 3211 unlock(&sched.lock) 3212 3213 if hz != 0 { 3214 resetcpuprofiler(hz) 3215 } 3216 3217 _g_.m.locks-- 3218 } 3219 3220 // Change number of processors. The world is stopped, sched is locked. 3221 // gcworkbufs are not being modified by either the GC or 3222 // the write barrier code. 3223 // Returns list of Ps with local work, they need to be scheduled by the caller. 3224 func procresize(nprocs int32) *p { 3225 old := gomaxprocs 3226 if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs { 3227 throw("procresize: invalid arg") 3228 } 3229 if trace.enabled { 3230 traceGomaxprocs(nprocs) 3231 } 3232 3233 // update statistics 3234 now := nanotime() 3235 if sched.procresizetime != 0 { 3236 sched.totaltime += int64(old) * (now - sched.procresizetime) 3237 } 3238 sched.procresizetime = now 3239 3240 // initialize new P's 3241 for i := int32(0); i < nprocs; i++ { 3242 pp := allp[i] 3243 if pp == nil { 3244 pp = new(p) 3245 pp.id = i 3246 pp.status = _Pgcstop 3247 pp.sudogcache = pp.sudogbuf[:0] 3248 for i := range pp.deferpool { 3249 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 3250 } 3251 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 3252 } 3253 if pp.mcache == nil { 3254 if old == 0 && i == 0 { 3255 if getg().m.mcache == nil { 3256 throw("missing mcache?") 3257 } 3258 pp.mcache = getg().m.mcache // bootstrap 3259 } else { 3260 pp.mcache = allocmcache() 3261 } 3262 } 3263 if raceenabled && pp.racectx == 0 { 3264 if old == 0 && i == 0 { 3265 pp.racectx = raceprocctx0 3266 raceprocctx0 = 0 // bootstrap 3267 } else { 3268 pp.racectx = raceproccreate() 3269 } 3270 } 3271 } 3272 3273 // free unused P's 3274 for i := nprocs; i < old; i++ { 3275 p := allp[i] 3276 if trace.enabled { 3277 if p == getg().m.p.ptr() { 3278 // moving to p[0], pretend that we were descheduled 3279 // and then scheduled again to keep the trace sane. 3280 traceGoSched() 3281 traceProcStop(p) 3282 } 3283 } 3284 // move all runnable goroutines to the global queue 3285 for p.runqhead != p.runqtail { 3286 // pop from tail of local queue 3287 p.runqtail-- 3288 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr() 3289 // push onto head of global queue 3290 globrunqputhead(gp) 3291 } 3292 if p.runnext != 0 { 3293 globrunqputhead(p.runnext.ptr()) 3294 p.runnext = 0 3295 } 3296 // if there's a background worker, make it runnable and put 3297 // it on the global queue so it can clean itself up 3298 if gp := p.gcBgMarkWorker.ptr(); gp != nil { 3299 casgstatus(gp, _Gwaiting, _Grunnable) 3300 if trace.enabled { 3301 traceGoUnpark(gp, 0) 3302 } 3303 globrunqput(gp) 3304 // This assignment doesn't race because the 3305 // world is stopped. 3306 p.gcBgMarkWorker.set(nil) 3307 } 3308 for i := range p.sudogbuf { 3309 p.sudogbuf[i] = nil 3310 } 3311 p.sudogcache = p.sudogbuf[:0] 3312 for i := range p.deferpool { 3313 for j := range p.deferpoolbuf[i] { 3314 p.deferpoolbuf[i][j] = nil 3315 } 3316 p.deferpool[i] = p.deferpoolbuf[i][:0] 3317 } 3318 freemcache(p.mcache) 3319 p.mcache = nil 3320 gfpurge(p) 3321 traceProcFree(p) 3322 if raceenabled { 3323 raceprocdestroy(p.racectx) 3324 p.racectx = 0 3325 } 3326 p.status = _Pdead 3327 // can't free P itself because it can be referenced by an M in syscall 3328 } 3329 3330 _g_ := getg() 3331 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 3332 // continue to use the current P 3333 _g_.m.p.ptr().status = _Prunning 3334 } else { 3335 // release the current P and acquire allp[0] 3336 if _g_.m.p != 0 { 3337 _g_.m.p.ptr().m = 0 3338 } 3339 _g_.m.p = 0 3340 _g_.m.mcache = nil 3341 p := allp[0] 3342 p.m = 0 3343 p.status = _Pidle 3344 acquirep(p) 3345 if trace.enabled { 3346 traceGoStart() 3347 } 3348 } 3349 var runnablePs *p 3350 for i := nprocs - 1; i >= 0; i-- { 3351 p := allp[i] 3352 if _g_.m.p.ptr() == p { 3353 continue 3354 } 3355 p.status = _Pidle 3356 if runqempty(p) { 3357 pidleput(p) 3358 } else { 3359 p.m.set(mget()) 3360 p.link.set(runnablePs) 3361 runnablePs = p 3362 } 3363 } 3364 stealOrder.reset(uint32(nprocs)) 3365 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 3366 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 3367 return runnablePs 3368 } 3369 3370 // Associate p and the current m. 3371 func acquirep(_p_ *p) { 3372 acquirep1(_p_) 3373 3374 // have p; write barriers now allowed 3375 _g_ := getg() 3376 _g_.m.mcache = _p_.mcache 3377 3378 if trace.enabled { 3379 traceProcStart() 3380 } 3381 } 3382 3383 // May run during STW, so write barriers are not allowed. 3384 //go:nowritebarrier 3385 func acquirep1(_p_ *p) { 3386 _g_ := getg() 3387 3388 if _g_.m.p != 0 || _g_.m.mcache != nil { 3389 throw("acquirep: already in go") 3390 } 3391 if _p_.m != 0 || _p_.status != _Pidle { 3392 id := int32(0) 3393 if _p_.m != 0 { 3394 id = _p_.m.ptr().id 3395 } 3396 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 3397 throw("acquirep: invalid p state") 3398 } 3399 _g_.m.p.set(_p_) 3400 _p_.m.set(_g_.m) 3401 _p_.status = _Prunning 3402 } 3403 3404 // Disassociate p and the current m. 3405 func releasep() *p { 3406 _g_ := getg() 3407 3408 if _g_.m.p == 0 || _g_.m.mcache == nil { 3409 throw("releasep: invalid arg") 3410 } 3411 _p_ := _g_.m.p.ptr() 3412 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 3413 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 3414 throw("releasep: invalid p state") 3415 } 3416 if trace.enabled { 3417 traceProcStop(_g_.m.p.ptr()) 3418 } 3419 _g_.m.p = 0 3420 _g_.m.mcache = nil 3421 _p_.m = 0 3422 _p_.status = _Pidle 3423 return _p_ 3424 } 3425 3426 func incidlelocked(v int32) { 3427 lock(&sched.lock) 3428 sched.nmidlelocked += v 3429 if v > 0 { 3430 checkdead() 3431 } 3432 unlock(&sched.lock) 3433 } 3434 3435 // Check for deadlock situation. 3436 // The check is based on number of running M's, if 0 -> deadlock. 3437 func checkdead() { 3438 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 3439 // there are no running goroutines. The calling program is 3440 // assumed to be running. 3441 if islibrary || isarchive { 3442 return 3443 } 3444 3445 // If we are dying because of a signal caught on an already idle thread, 3446 // freezetheworld will cause all running threads to block. 3447 // And runtime will essentially enter into deadlock state, 3448 // except that there is a thread that will call exit soon. 3449 if panicking > 0 { 3450 return 3451 } 3452 3453 // -1 for sysmon 3454 run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1 3455 if run > 0 { 3456 return 3457 } 3458 if run < 0 { 3459 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n") 3460 throw("checkdead: inconsistent counts") 3461 } 3462 3463 grunning := 0 3464 lock(&allglock) 3465 for i := 0; i < len(allgs); i++ { 3466 gp := allgs[i] 3467 if isSystemGoroutine(gp) { 3468 continue 3469 } 3470 s := readgstatus(gp) 3471 switch s &^ _Gscan { 3472 case _Gwaiting: 3473 grunning++ 3474 case _Grunnable, 3475 _Grunning, 3476 _Gsyscall: 3477 unlock(&allglock) 3478 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 3479 throw("checkdead: runnable g") 3480 } 3481 } 3482 unlock(&allglock) 3483 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 3484 throw("no goroutines (main called runtime.Goexit) - deadlock!") 3485 } 3486 3487 // Maybe jump time forward for playground. 3488 gp := timejump() 3489 if gp != nil { 3490 casgstatus(gp, _Gwaiting, _Grunnable) 3491 globrunqput(gp) 3492 _p_ := pidleget() 3493 if _p_ == nil { 3494 throw("checkdead: no p for timer") 3495 } 3496 mp := mget() 3497 if mp == nil { 3498 // There should always be a free M since 3499 // nothing is running. 3500 throw("checkdead: no m for timer") 3501 } 3502 mp.nextp.set(_p_) 3503 notewakeup(&mp.park) 3504 return 3505 } 3506 3507 getg().m.throwing = -1 // do not dump full stacks 3508 throw("all goroutines are asleep - deadlock!") 3509 } 3510 3511 // forcegcperiod is the maximum time in nanoseconds between garbage 3512 // collections. If we go this long without a garbage collection, one 3513 // is forced to run. 3514 // 3515 // This is a variable for testing purposes. It normally doesn't change. 3516 var forcegcperiod int64 = 2 * 60 * 1e9 3517 3518 // Always runs without a P, so write barriers are not allowed. 3519 // 3520 //go:nowritebarrierrec 3521 func sysmon() { 3522 // If a heap span goes unused for 5 minutes after a garbage collection, 3523 // we hand it back to the operating system. 3524 scavengelimit := int64(5 * 60 * 1e9) 3525 3526 if debug.scavenge > 0 { 3527 // Scavenge-a-lot for testing. 3528 forcegcperiod = 10 * 1e6 3529 scavengelimit = 20 * 1e6 3530 } 3531 3532 lastscavenge := nanotime() 3533 nscavenge := 0 3534 3535 lasttrace := int64(0) 3536 idle := 0 // how many cycles in succession we had not wokeup somebody 3537 delay := uint32(0) 3538 for { 3539 if idle == 0 { // start with 20us sleep... 3540 delay = 20 3541 } else if idle > 50 { // start doubling the sleep after 1ms... 3542 delay *= 2 3543 } 3544 if delay > 10*1000 { // up to 10ms 3545 delay = 10 * 1000 3546 } 3547 usleep(delay) 3548 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic 3549 lock(&sched.lock) 3550 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 3551 atomic.Store(&sched.sysmonwait, 1) 3552 unlock(&sched.lock) 3553 // Make wake-up period small enough 3554 // for the sampling to be correct. 3555 maxsleep := forcegcperiod / 2 3556 if scavengelimit < forcegcperiod { 3557 maxsleep = scavengelimit / 2 3558 } 3559 notetsleep(&sched.sysmonnote, maxsleep) 3560 lock(&sched.lock) 3561 atomic.Store(&sched.sysmonwait, 0) 3562 noteclear(&sched.sysmonnote) 3563 idle = 0 3564 delay = 20 3565 } 3566 unlock(&sched.lock) 3567 } 3568 // poll network if not polled for more than 10ms 3569 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 3570 now := nanotime() 3571 unixnow := unixnanotime() 3572 if lastpoll != 0 && lastpoll+10*1000*1000 < now { 3573 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 3574 gp := netpoll(false) // non-blocking - returns list of goroutines 3575 if gp != nil { 3576 // Need to decrement number of idle locked M's 3577 // (pretending that one more is running) before injectglist. 3578 // Otherwise it can lead to the following situation: 3579 // injectglist grabs all P's but before it starts M's to run the P's, 3580 // another M returns from syscall, finishes running its G, 3581 // observes that there is no work to do and no other running M's 3582 // and reports deadlock. 3583 incidlelocked(-1) 3584 injectglist(gp) 3585 incidlelocked(1) 3586 } 3587 } 3588 // retake P's blocked in syscalls 3589 // and preempt long running G's 3590 if retake(now) != 0 { 3591 idle = 0 3592 } else { 3593 idle++ 3594 } 3595 // check if we need to force a GC 3596 lastgc := int64(atomic.Load64(&memstats.last_gc)) 3597 if gcphase == _GCoff && lastgc != 0 && unixnow-lastgc > forcegcperiod && atomic.Load(&forcegc.idle) != 0 { 3598 lock(&forcegc.lock) 3599 forcegc.idle = 0 3600 forcegc.g.schedlink = 0 3601 injectglist(forcegc.g) 3602 unlock(&forcegc.lock) 3603 } 3604 // scavenge heap once in a while 3605 if lastscavenge+scavengelimit/2 < now { 3606 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) 3607 lastscavenge = now 3608 nscavenge++ 3609 } 3610 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { 3611 lasttrace = now 3612 schedtrace(debug.scheddetail > 0) 3613 } 3614 } 3615 } 3616 3617 var pdesc [_MaxGomaxprocs]struct { 3618 schedtick uint32 3619 schedwhen int64 3620 syscalltick uint32 3621 syscallwhen int64 3622 } 3623 3624 // forcePreemptNS is the time slice given to a G before it is 3625 // preempted. 3626 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 3627 3628 func retake(now int64) uint32 { 3629 n := 0 3630 for i := int32(0); i < gomaxprocs; i++ { 3631 _p_ := allp[i] 3632 if _p_ == nil { 3633 continue 3634 } 3635 pd := &pdesc[i] 3636 s := _p_.status 3637 if s == _Psyscall { 3638 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 3639 t := int64(_p_.syscalltick) 3640 if int64(pd.syscalltick) != t { 3641 pd.syscalltick = uint32(t) 3642 pd.syscallwhen = now 3643 continue 3644 } 3645 // On the one hand we don't want to retake Ps if there is no other work to do, 3646 // but on the other hand we want to retake them eventually 3647 // because they can prevent the sysmon thread from deep sleep. 3648 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 3649 continue 3650 } 3651 // Need to decrement number of idle locked M's 3652 // (pretending that one more is running) before the CAS. 3653 // Otherwise the M from which we retake can exit the syscall, 3654 // increment nmidle and report deadlock. 3655 incidlelocked(-1) 3656 if atomic.Cas(&_p_.status, s, _Pidle) { 3657 if trace.enabled { 3658 traceGoSysBlock(_p_) 3659 traceProcStop(_p_) 3660 } 3661 n++ 3662 _p_.syscalltick++ 3663 handoffp(_p_) 3664 } 3665 incidlelocked(1) 3666 } else if s == _Prunning { 3667 // Preempt G if it's running for too long. 3668 t := int64(_p_.schedtick) 3669 if int64(pd.schedtick) != t { 3670 pd.schedtick = uint32(t) 3671 pd.schedwhen = now 3672 continue 3673 } 3674 if pd.schedwhen+forcePreemptNS > now { 3675 continue 3676 } 3677 preemptone(_p_) 3678 } 3679 } 3680 return uint32(n) 3681 } 3682 3683 // Tell all goroutines that they have been preempted and they should stop. 3684 // This function is purely best-effort. It can fail to inform a goroutine if a 3685 // processor just started running it. 3686 // No locks need to be held. 3687 // Returns true if preemption request was issued to at least one goroutine. 3688 func preemptall() bool { 3689 res := false 3690 for i := int32(0); i < gomaxprocs; i++ { 3691 _p_ := allp[i] 3692 if _p_ == nil || _p_.status != _Prunning { 3693 continue 3694 } 3695 if preemptone(_p_) { 3696 res = true 3697 } 3698 } 3699 return res 3700 } 3701 3702 // Tell the goroutine running on processor P to stop. 3703 // This function is purely best-effort. It can incorrectly fail to inform the 3704 // goroutine. It can send inform the wrong goroutine. Even if it informs the 3705 // correct goroutine, that goroutine might ignore the request if it is 3706 // simultaneously executing newstack. 3707 // No lock needs to be held. 3708 // Returns true if preemption request was issued. 3709 // The actual preemption will happen at some point in the future 3710 // and will be indicated by the gp->status no longer being 3711 // Grunning 3712 func preemptone(_p_ *p) bool { 3713 mp := _p_.m.ptr() 3714 if mp == nil || mp == getg().m { 3715 return false 3716 } 3717 gp := mp.curg 3718 if gp == nil || gp == mp.g0 { 3719 return false 3720 } 3721 3722 gp.preempt = true 3723 3724 // Every call in a go routine checks for stack overflow by 3725 // comparing the current stack pointer to gp->stackguard0. 3726 // Setting gp->stackguard0 to StackPreempt folds 3727 // preemption into the normal stack overflow check. 3728 gp.stackguard0 = stackPreempt 3729 return true 3730 } 3731 3732 var starttime int64 3733 3734 func schedtrace(detailed bool) { 3735 now := nanotime() 3736 if starttime == 0 { 3737 starttime = now 3738 } 3739 3740 lock(&sched.lock) 3741 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 3742 if detailed { 3743 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 3744 } 3745 // We must be careful while reading data from P's, M's and G's. 3746 // Even if we hold schedlock, most data can be changed concurrently. 3747 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 3748 for i := int32(0); i < gomaxprocs; i++ { 3749 _p_ := allp[i] 3750 if _p_ == nil { 3751 continue 3752 } 3753 mp := _p_.m.ptr() 3754 h := atomic.Load(&_p_.runqhead) 3755 t := atomic.Load(&_p_.runqtail) 3756 if detailed { 3757 id := int32(-1) 3758 if mp != nil { 3759 id = mp.id 3760 } 3761 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") 3762 } else { 3763 // In non-detailed mode format lengths of per-P run queues as: 3764 // [len1 len2 len3 len4] 3765 print(" ") 3766 if i == 0 { 3767 print("[") 3768 } 3769 print(t - h) 3770 if i == gomaxprocs-1 { 3771 print("]\n") 3772 } 3773 } 3774 } 3775 3776 if !detailed { 3777 unlock(&sched.lock) 3778 return 3779 } 3780 3781 for mp := allm; mp != nil; mp = mp.alllink { 3782 _p_ := mp.p.ptr() 3783 gp := mp.curg 3784 lockedg := mp.lockedg 3785 id1 := int32(-1) 3786 if _p_ != nil { 3787 id1 = _p_.id 3788 } 3789 id2 := int64(-1) 3790 if gp != nil { 3791 id2 = gp.goid 3792 } 3793 id3 := int64(-1) 3794 if lockedg != nil { 3795 id3 = lockedg.goid 3796 } 3797 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", getg().m.blocked, " lockedg=", id3, "\n") 3798 } 3799 3800 lock(&allglock) 3801 for gi := 0; gi < len(allgs); gi++ { 3802 gp := allgs[gi] 3803 mp := gp.m 3804 lockedm := gp.lockedm 3805 id1 := int32(-1) 3806 if mp != nil { 3807 id1 = mp.id 3808 } 3809 id2 := int32(-1) 3810 if lockedm != nil { 3811 id2 = lockedm.id 3812 } 3813 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n") 3814 } 3815 unlock(&allglock) 3816 unlock(&sched.lock) 3817 } 3818 3819 // Put mp on midle list. 3820 // Sched must be locked. 3821 // May run during STW, so write barriers are not allowed. 3822 //go:nowritebarrier 3823 func mput(mp *m) { 3824 mp.schedlink = sched.midle 3825 sched.midle.set(mp) 3826 sched.nmidle++ 3827 checkdead() 3828 } 3829 3830 // Try to get an m from midle list. 3831 // Sched must be locked. 3832 // May run during STW, so write barriers are not allowed. 3833 //go:nowritebarrier 3834 func mget() *m { 3835 mp := sched.midle.ptr() 3836 if mp != nil { 3837 sched.midle = mp.schedlink 3838 sched.nmidle-- 3839 } 3840 return mp 3841 } 3842 3843 // Put gp on the global runnable queue. 3844 // Sched must be locked. 3845 // May run during STW, so write barriers are not allowed. 3846 //go:nowritebarrier 3847 func globrunqput(gp *g) { 3848 gp.schedlink = 0 3849 if sched.runqtail != 0 { 3850 sched.runqtail.ptr().schedlink.set(gp) 3851 } else { 3852 sched.runqhead.set(gp) 3853 } 3854 sched.runqtail.set(gp) 3855 sched.runqsize++ 3856 } 3857 3858 // Put gp at the head of the global runnable queue. 3859 // Sched must be locked. 3860 // May run during STW, so write barriers are not allowed. 3861 //go:nowritebarrier 3862 func globrunqputhead(gp *g) { 3863 gp.schedlink = sched.runqhead 3864 sched.runqhead.set(gp) 3865 if sched.runqtail == 0 { 3866 sched.runqtail.set(gp) 3867 } 3868 sched.runqsize++ 3869 } 3870 3871 // Put a batch of runnable goroutines on the global runnable queue. 3872 // Sched must be locked. 3873 func globrunqputbatch(ghead *g, gtail *g, n int32) { 3874 gtail.schedlink = 0 3875 if sched.runqtail != 0 { 3876 sched.runqtail.ptr().schedlink.set(ghead) 3877 } else { 3878 sched.runqhead.set(ghead) 3879 } 3880 sched.runqtail.set(gtail) 3881 sched.runqsize += n 3882 } 3883 3884 // Try get a batch of G's from the global runnable queue. 3885 // Sched must be locked. 3886 func globrunqget(_p_ *p, max int32) *g { 3887 if sched.runqsize == 0 { 3888 return nil 3889 } 3890 3891 n := sched.runqsize/gomaxprocs + 1 3892 if n > sched.runqsize { 3893 n = sched.runqsize 3894 } 3895 if max > 0 && n > max { 3896 n = max 3897 } 3898 if n > int32(len(_p_.runq))/2 { 3899 n = int32(len(_p_.runq)) / 2 3900 } 3901 3902 sched.runqsize -= n 3903 if sched.runqsize == 0 { 3904 sched.runqtail = 0 3905 } 3906 3907 gp := sched.runqhead.ptr() 3908 sched.runqhead = gp.schedlink 3909 n-- 3910 for ; n > 0; n-- { 3911 gp1 := sched.runqhead.ptr() 3912 sched.runqhead = gp1.schedlink 3913 runqput(_p_, gp1, false) 3914 } 3915 return gp 3916 } 3917 3918 // Put p to on _Pidle list. 3919 // Sched must be locked. 3920 // May run during STW, so write barriers are not allowed. 3921 //go:nowritebarrier 3922 func pidleput(_p_ *p) { 3923 if !runqempty(_p_) { 3924 throw("pidleput: P has non-empty run queue") 3925 } 3926 _p_.link = sched.pidle 3927 sched.pidle.set(_p_) 3928 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 3929 } 3930 3931 // Try get a p from _Pidle list. 3932 // Sched must be locked. 3933 // May run during STW, so write barriers are not allowed. 3934 //go:nowritebarrier 3935 func pidleget() *p { 3936 _p_ := sched.pidle.ptr() 3937 if _p_ != nil { 3938 sched.pidle = _p_.link 3939 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 3940 } 3941 return _p_ 3942 } 3943 3944 // runqempty returns true if _p_ has no Gs on its local run queue. 3945 // It never returns true spuriously. 3946 func runqempty(_p_ *p) bool { 3947 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, 3948 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext. 3949 // Simply observing that runqhead == runqtail and then observing that runqnext == nil 3950 // does not mean the queue is empty. 3951 for { 3952 head := atomic.Load(&_p_.runqhead) 3953 tail := atomic.Load(&_p_.runqtail) 3954 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext))) 3955 if tail == atomic.Load(&_p_.runqtail) { 3956 return head == tail && runnext == 0 3957 } 3958 } 3959 } 3960 3961 // To shake out latent assumptions about scheduling order, 3962 // we introduce some randomness into scheduling decisions 3963 // when running with the race detector. 3964 // The need for this was made obvious by changing the 3965 // (deterministic) scheduling order in Go 1.5 and breaking 3966 // many poorly-written tests. 3967 // With the randomness here, as long as the tests pass 3968 // consistently with -race, they shouldn't have latent scheduling 3969 // assumptions. 3970 const randomizeScheduler = raceenabled 3971 3972 // runqput tries to put g on the local runnable queue. 3973 // If next if false, runqput adds g to the tail of the runnable queue. 3974 // If next is true, runqput puts g in the _p_.runnext slot. 3975 // If the run queue is full, runnext puts g on the global queue. 3976 // Executed only by the owner P. 3977 func runqput(_p_ *p, gp *g, next bool) { 3978 if randomizeScheduler && next && fastrand1()%2 == 0 { 3979 next = false 3980 } 3981 3982 if next { 3983 retryNext: 3984 oldnext := _p_.runnext 3985 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 3986 goto retryNext 3987 } 3988 if oldnext == 0 { 3989 return 3990 } 3991 // Kick the old runnext out to the regular run queue. 3992 gp = oldnext.ptr() 3993 } 3994 3995 retry: 3996 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 3997 t := _p_.runqtail 3998 if t-h < uint32(len(_p_.runq)) { 3999 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 4000 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 4001 return 4002 } 4003 if runqputslow(_p_, gp, h, t) { 4004 return 4005 } 4006 // the queue is not full, now the put above must succeed 4007 goto retry 4008 } 4009 4010 // Put g and a batch of work from local runnable queue on global queue. 4011 // Executed only by the owner P. 4012 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 4013 var batch [len(_p_.runq)/2 + 1]*g 4014 4015 // First, grab a batch from local queue. 4016 n := t - h 4017 n = n / 2 4018 if n != uint32(len(_p_.runq)/2) { 4019 throw("runqputslow: queue is not full") 4020 } 4021 for i := uint32(0); i < n; i++ { 4022 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 4023 } 4024 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4025 return false 4026 } 4027 batch[n] = gp 4028 4029 if randomizeScheduler { 4030 for i := uint32(1); i <= n; i++ { 4031 j := fastrand1() % (i + 1) 4032 batch[i], batch[j] = batch[j], batch[i] 4033 } 4034 } 4035 4036 // Link the goroutines. 4037 for i := uint32(0); i < n; i++ { 4038 batch[i].schedlink.set(batch[i+1]) 4039 } 4040 4041 // Now put the batch on global queue. 4042 lock(&sched.lock) 4043 globrunqputbatch(batch[0], batch[n], int32(n+1)) 4044 unlock(&sched.lock) 4045 return true 4046 } 4047 4048 // Get g from local runnable queue. 4049 // If inheritTime is true, gp should inherit the remaining time in the 4050 // current time slice. Otherwise, it should start a new time slice. 4051 // Executed only by the owner P. 4052 func runqget(_p_ *p) (gp *g, inheritTime bool) { 4053 // If there's a runnext, it's the next G to run. 4054 for { 4055 next := _p_.runnext 4056 if next == 0 { 4057 break 4058 } 4059 if _p_.runnext.cas(next, 0) { 4060 return next.ptr(), true 4061 } 4062 } 4063 4064 for { 4065 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4066 t := _p_.runqtail 4067 if t == h { 4068 return nil, false 4069 } 4070 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 4071 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume 4072 return gp, false 4073 } 4074 } 4075 } 4076 4077 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 4078 // Batch is a ring buffer starting at batchHead. 4079 // Returns number of grabbed goroutines. 4080 // Can be executed by any P. 4081 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 4082 for { 4083 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4084 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer 4085 n := t - h 4086 n = n - n/2 4087 if n == 0 { 4088 if stealRunNextG { 4089 // Try to steal from _p_.runnext. 4090 if next := _p_.runnext; next != 0 { 4091 // Sleep to ensure that _p_ isn't about to run the g we 4092 // are about to steal. 4093 // The important use case here is when the g running on _p_ 4094 // ready()s another g and then almost immediately blocks. 4095 // Instead of stealing runnext in this window, back off 4096 // to give _p_ a chance to schedule runnext. This will avoid 4097 // thrashing gs between different Ps. 4098 // A sync chan send/recv takes ~50ns as of time of writing, 4099 // so 3us gives ~50x overshoot. 4100 if GOOS != "windows" { 4101 usleep(3) 4102 } else { 4103 // On windows system timer granularity is 1-15ms, 4104 // which is way too much for this optimization. 4105 // So just yield. 4106 osyield() 4107 } 4108 if !_p_.runnext.cas(next, 0) { 4109 continue 4110 } 4111 batch[batchHead%uint32(len(batch))] = next 4112 return 1 4113 } 4114 } 4115 return 0 4116 } 4117 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 4118 continue 4119 } 4120 for i := uint32(0); i < n; i++ { 4121 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 4122 batch[(batchHead+i)%uint32(len(batch))] = g 4123 } 4124 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4125 return n 4126 } 4127 } 4128 } 4129 4130 // Steal half of elements from local runnable queue of p2 4131 // and put onto local runnable queue of p. 4132 // Returns one of the stolen elements (or nil if failed). 4133 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 4134 t := _p_.runqtail 4135 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 4136 if n == 0 { 4137 return nil 4138 } 4139 n-- 4140 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 4141 if n == 0 { 4142 return gp 4143 } 4144 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 4145 if t-h+n >= uint32(len(_p_.runq)) { 4146 throw("runqsteal: runq overflow") 4147 } 4148 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 4149 return gp 4150 } 4151 4152 //go:linkname setMaxThreads runtime/debug.setMaxThreads 4153 func setMaxThreads(in int) (out int) { 4154 lock(&sched.lock) 4155 out = int(sched.maxmcount) 4156 sched.maxmcount = int32(in) 4157 checkmcount() 4158 unlock(&sched.lock) 4159 return 4160 } 4161 4162 func haveexperiment(name string) bool { 4163 if name == "framepointer" { 4164 return framepointer_enabled // set by linker 4165 } 4166 x := sys.Goexperiment 4167 for x != "" { 4168 xname := "" 4169 i := index(x, ",") 4170 if i < 0 { 4171 xname, x = x, "" 4172 } else { 4173 xname, x = x[:i], x[i+1:] 4174 } 4175 if xname == name { 4176 return true 4177 } 4178 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name { 4179 return false 4180 } 4181 } 4182 return false 4183 } 4184 4185 //go:nosplit 4186 func procPin() int { 4187 _g_ := getg() 4188 mp := _g_.m 4189 4190 mp.locks++ 4191 return int(mp.p.ptr().id) 4192 } 4193 4194 //go:nosplit 4195 func procUnpin() { 4196 _g_ := getg() 4197 _g_.m.locks-- 4198 } 4199 4200 //go:linkname sync_runtime_procPin sync.runtime_procPin 4201 //go:nosplit 4202 func sync_runtime_procPin() int { 4203 return procPin() 4204 } 4205 4206 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 4207 //go:nosplit 4208 func sync_runtime_procUnpin() { 4209 procUnpin() 4210 } 4211 4212 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 4213 //go:nosplit 4214 func sync_atomic_runtime_procPin() int { 4215 return procPin() 4216 } 4217 4218 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 4219 //go:nosplit 4220 func sync_atomic_runtime_procUnpin() { 4221 procUnpin() 4222 } 4223 4224 // Active spinning for sync.Mutex. 4225 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 4226 //go:nosplit 4227 func sync_runtime_canSpin(i int) bool { 4228 // sync.Mutex is cooperative, so we are conservative with spinning. 4229 // Spin only few times and only if running on a multicore machine and 4230 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 4231 // As opposed to runtime mutex we don't do passive spinning here, 4232 // because there can be work on global runq on on other Ps. 4233 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 4234 return false 4235 } 4236 if p := getg().m.p.ptr(); !runqempty(p) { 4237 return false 4238 } 4239 return true 4240 } 4241 4242 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 4243 //go:nosplit 4244 func sync_runtime_doSpin() { 4245 procyield(active_spin_cnt) 4246 } 4247 4248 var stealOrder randomOrder 4249 4250 // randomOrder/randomEnum are helper types for randomized work stealing. 4251 // They allow to enumerate all Ps in different pseudo-random orders without repetitions. 4252 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS 4253 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration. 4254 type randomOrder struct { 4255 count uint32 4256 coprimes []uint32 4257 } 4258 4259 type randomEnum struct { 4260 i uint32 4261 count uint32 4262 pos uint32 4263 inc uint32 4264 } 4265 4266 func (ord *randomOrder) reset(count uint32) { 4267 ord.count = count 4268 ord.coprimes = ord.coprimes[:0] 4269 for i := uint32(1); i <= count; i++ { 4270 if gcd(i, count) == 1 { 4271 ord.coprimes = append(ord.coprimes, i) 4272 } 4273 } 4274 } 4275 4276 func (ord *randomOrder) start(i uint32) randomEnum { 4277 return randomEnum{ 4278 count: ord.count, 4279 pos: i % ord.count, 4280 inc: ord.coprimes[i%uint32(len(ord.coprimes))], 4281 } 4282 } 4283 4284 func (enum *randomEnum) done() bool { 4285 return enum.i == enum.count 4286 } 4287 4288 func (enum *randomEnum) next() { 4289 enum.i++ 4290 enum.pos = (enum.pos + enum.inc) % enum.count 4291 } 4292 4293 func (enum *randomEnum) position() uint32 { 4294 return enum.pos 4295 } 4296 4297 func gcd(a, b uint32) uint32 { 4298 for b != 0 { 4299 a, b = b, a%b 4300 } 4301 return a 4302 }