github.com/brownsys/tracing-framework-go@v0.0.0-20161210174012-0542a62412fe/go/darwin_amd64/src/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 var buildVersion = sys.TheVersion 14 15 // Goroutine scheduler 16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 17 // 18 // The main concepts are: 19 // G - goroutine. 20 // M - worker thread, or machine. 21 // P - processor, a resource that is required to execute Go code. 22 // M must have an associated P to execute Go code, however it can be 23 // blocked or in a syscall w/o an associated P. 24 // 25 // Design doc at https://golang.org/s/go11sched. 26 27 // Worker thread parking/unparking. 28 // We need to balance between keeping enough running worker threads to utilize 29 // available hardware parallelism and parking excessive running worker threads 30 // to conserve CPU resources and power. This is not simple for two reasons: 31 // (1) scheduler state is intentionally distributed (in particular, per-P work 32 // queues), so it is not possible to compute global predicates on fast paths; 33 // (2) for optimal thread management we would need to know the future (don't park 34 // a worker thread when a new goroutine will be readied in near future). 35 // 36 // Three rejected approaches that would work badly: 37 // 1. Centralize all scheduler state (would inhibit scalability). 38 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there 39 // is a spare P, unpark a thread and handoff it the thread and the goroutine. 40 // This would lead to thread state thrashing, as the thread that readied the 41 // goroutine can be out of work the very next moment, we will need to park it. 42 // Also, it would destroy locality of computation as we want to preserve 43 // dependent goroutines on the same thread; and introduce additional latency. 44 // 3. Unpark an additional thread whenever we ready a goroutine and there is an 45 // idle P, but don't do handoff. This would lead to excessive thread parking/ 46 // unparking as the additional threads will instantly park without discovering 47 // any work to do. 48 // 49 // The current approach: 50 // We unpark an additional thread when we ready a goroutine if (1) there is an 51 // idle P and there are no "spinning" worker threads. A worker thread is considered 52 // spinning if it is out of local work and did not find work in global run queue/ 53 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. 54 // Threads unparked this way are also considered spinning; we don't do goroutine 55 // handoff so such threads are out of work initially. Spinning threads do some 56 // spinning looking for work in per-P run queues before parking. If a spinning 57 // thread finds work it takes itself out of the spinning state and proceeds to 58 // execution. If it does not find work it takes itself out of the spinning state 59 // and then parks. 60 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark 61 // new threads when readying goroutines. To compensate for that, if the last spinning 62 // thread finds work and stops spinning, it must unpark a new spinning thread. 63 // This approach smooths out unjustified spikes of thread unparking, 64 // but at the same time guarantees eventual maximal CPU parallelism utilization. 65 // 66 // The main implementation complication is that we need to be very careful during 67 // spinning->non-spinning thread transition. This transition can race with submission 68 // of a new goroutine, and either one part or another needs to unpark another worker 69 // thread. If they both fail to do that, we can end up with semi-persistent CPU 70 // underutilization. The general pattern for goroutine readying is: submit a goroutine 71 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. 72 // The general pattern for spinning->non-spinning transition is: decrement nmspinning, 73 // #StoreLoad-style memory barrier, check all per-P work queues for new work. 74 // Note that all this complexity does not apply to global run queue as we are not 75 // sloppy about thread unparking when submitting to global queue. Also see comments 76 // for nmspinning manipulation. 77 78 var ( 79 m0 m 80 g0 g 81 raceprocctx0 uintptr 82 ) 83 84 //go:linkname runtime_init runtime.init 85 func runtime_init() 86 87 //go:linkname main_init main.init 88 func main_init() 89 90 // main_init_done is a signal used by cgocallbackg that initialization 91 // has been completed. It is made before _cgo_notify_runtime_init_done, 92 // so all cgo calls can rely on it existing. When main_init is complete, 93 // it is closed, meaning cgocallbackg can reliably receive from it. 94 var main_init_done chan bool 95 96 //go:linkname main_main main.main 97 func main_main() 98 99 // runtimeInitTime is the nanotime() at which the runtime started. 100 var runtimeInitTime int64 101 102 // Value to use for signal mask for newly created M's. 103 var initSigmask sigset 104 105 // The main goroutine. 106 func main() { 107 g := getg() 108 109 // Racectx of m0->g0 is used only as the parent of the main goroutine. 110 // It must not be used for anything else. 111 g.m.g0.racectx = 0 112 113 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 114 // Using decimal instead of binary GB and MB because 115 // they look nicer in the stack overflow failure message. 116 if sys.PtrSize == 8 { 117 maxstacksize = 1000000000 118 } else { 119 maxstacksize = 250000000 120 } 121 122 // Record when the world started. 123 runtimeInitTime = nanotime() 124 125 systemstack(func() { 126 newm(sysmon, nil) 127 }) 128 129 // Lock the main goroutine onto this, the main OS thread, 130 // during initialization. Most programs won't care, but a few 131 // do require certain calls to be made by the main thread. 132 // Those can arrange for main.main to run in the main thread 133 // by calling runtime.LockOSThread during initialization 134 // to preserve the lock. 135 lockOSThread() 136 137 if g.m != &m0 { 138 throw("runtime.main not on m0") 139 } 140 141 runtime_init() // must be before defer 142 143 // Defer unlock so that runtime.Goexit during init does the unlock too. 144 needUnlock := true 145 defer func() { 146 if needUnlock { 147 unlockOSThread() 148 } 149 }() 150 151 gcenable() 152 153 main_init_done = make(chan bool) 154 if iscgo { 155 if _cgo_thread_start == nil { 156 throw("_cgo_thread_start missing") 157 } 158 if GOOS != "windows" { 159 if _cgo_setenv == nil { 160 throw("_cgo_setenv missing") 161 } 162 if _cgo_unsetenv == nil { 163 throw("_cgo_unsetenv missing") 164 } 165 } 166 if _cgo_notify_runtime_init_done == nil { 167 throw("_cgo_notify_runtime_init_done missing") 168 } 169 cgocall(_cgo_notify_runtime_init_done, nil) 170 } 171 172 main_init() 173 close(main_init_done) 174 175 needUnlock = false 176 unlockOSThread() 177 178 if isarchive || islibrary { 179 // A program compiled with -buildmode=c-archive or c-shared 180 // has a main, but it is not executed. 181 return 182 } 183 main_main() 184 if raceenabled { 185 racefini() 186 } 187 188 // Make racy client program work: if panicking on 189 // another goroutine at the same time as main returns, 190 // let the other goroutine finish printing the panic trace. 191 // Once it does, it will exit. See issue 3934. 192 if panicking != 0 { 193 gopark(nil, nil, "panicwait", traceEvGoStop, 1) 194 } 195 196 exit(0) 197 for { 198 var x *int32 199 *x = 0 200 } 201 } 202 203 // os_beforeExit is called from os.Exit(0). 204 //go:linkname os_beforeExit os.runtime_beforeExit 205 func os_beforeExit() { 206 if raceenabled { 207 racefini() 208 } 209 } 210 211 // start forcegc helper goroutine 212 func init() { 213 go forcegchelper() 214 } 215 216 func forcegchelper() { 217 forcegc.g = getg() 218 for { 219 lock(&forcegc.lock) 220 if forcegc.idle != 0 { 221 throw("forcegc: phase error") 222 } 223 atomic.Store(&forcegc.idle, 1) 224 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1) 225 // this goroutine is explicitly resumed by sysmon 226 if debug.gctrace > 0 { 227 println("GC forced") 228 } 229 gcStart(gcBackgroundMode, true) 230 } 231 } 232 233 //go:nosplit 234 235 // Gosched yields the processor, allowing other goroutines to run. It does not 236 // suspend the current goroutine, so execution resumes automatically. 237 func Gosched() { 238 mcall(gosched_m) 239 } 240 241 // Puts the current goroutine into a waiting state and calls unlockf. 242 // If unlockf returns false, the goroutine is resumed. 243 // unlockf must not access this G's stack, as it may be moved between 244 // the call to gopark and the call to unlockf. 245 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) { 246 mp := acquirem() 247 gp := mp.curg 248 status := readgstatus(gp) 249 if status != _Grunning && status != _Gscanrunning { 250 throw("gopark: bad g status") 251 } 252 mp.waitlock = lock 253 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) 254 gp.waitreason = reason 255 mp.waittraceev = traceEv 256 mp.waittraceskip = traceskip 257 releasem(mp) 258 // can't do anything that might move the G between Ms here. 259 mcall(park_m) 260 } 261 262 // Puts the current goroutine into a waiting state and unlocks the lock. 263 // The goroutine can be made runnable again by calling goready(gp). 264 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) { 265 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 266 } 267 268 func goready(gp *g, traceskip int) { 269 systemstack(func() { 270 ready(gp, traceskip, true) 271 }) 272 } 273 274 //go:nosplit 275 func acquireSudog() *sudog { 276 // Delicate dance: the semaphore implementation calls 277 // acquireSudog, acquireSudog calls new(sudog), 278 // new calls malloc, malloc can call the garbage collector, 279 // and the garbage collector calls the semaphore implementation 280 // in stopTheWorld. 281 // Break the cycle by doing acquirem/releasem around new(sudog). 282 // The acquirem/releasem increments m.locks during new(sudog), 283 // which keeps the garbage collector from being invoked. 284 mp := acquirem() 285 pp := mp.p.ptr() 286 if len(pp.sudogcache) == 0 { 287 lock(&sched.sudoglock) 288 // First, try to grab a batch from central cache. 289 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 290 s := sched.sudogcache 291 sched.sudogcache = s.next 292 s.next = nil 293 pp.sudogcache = append(pp.sudogcache, s) 294 } 295 unlock(&sched.sudoglock) 296 // If the central cache is empty, allocate a new one. 297 if len(pp.sudogcache) == 0 { 298 pp.sudogcache = append(pp.sudogcache, new(sudog)) 299 } 300 } 301 n := len(pp.sudogcache) 302 s := pp.sudogcache[n-1] 303 pp.sudogcache[n-1] = nil 304 pp.sudogcache = pp.sudogcache[:n-1] 305 if s.elem != nil { 306 throw("acquireSudog: found s.elem != nil in cache") 307 } 308 releasem(mp) 309 return s 310 } 311 312 //go:nosplit 313 func releaseSudog(s *sudog) { 314 if s.elem != nil { 315 throw("runtime: sudog with non-nil elem") 316 } 317 if s.selectdone != nil { 318 throw("runtime: sudog with non-nil selectdone") 319 } 320 if s.next != nil { 321 throw("runtime: sudog with non-nil next") 322 } 323 if s.prev != nil { 324 throw("runtime: sudog with non-nil prev") 325 } 326 if s.waitlink != nil { 327 throw("runtime: sudog with non-nil waitlink") 328 } 329 if s.c != nil { 330 throw("runtime: sudog with non-nil c") 331 } 332 gp := getg() 333 if gp.param != nil { 334 throw("runtime: releaseSudog with non-nil gp.param") 335 } 336 mp := acquirem() // avoid rescheduling to another P 337 pp := mp.p.ptr() 338 if len(pp.sudogcache) == cap(pp.sudogcache) { 339 // Transfer half of local cache to the central cache. 340 var first, last *sudog 341 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 342 n := len(pp.sudogcache) 343 p := pp.sudogcache[n-1] 344 pp.sudogcache[n-1] = nil 345 pp.sudogcache = pp.sudogcache[:n-1] 346 if first == nil { 347 first = p 348 } else { 349 last.next = p 350 } 351 last = p 352 } 353 lock(&sched.sudoglock) 354 last.next = sched.sudogcache 355 sched.sudogcache = first 356 unlock(&sched.sudoglock) 357 } 358 pp.sudogcache = append(pp.sudogcache, s) 359 releasem(mp) 360 } 361 362 // funcPC returns the entry PC of the function f. 363 // It assumes that f is a func value. Otherwise the behavior is undefined. 364 //go:nosplit 365 func funcPC(f interface{}) uintptr { 366 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize)) 367 } 368 369 // called from assembly 370 func badmcall(fn func(*g)) { 371 throw("runtime: mcall called on m->g0 stack") 372 } 373 374 func badmcall2(fn func(*g)) { 375 throw("runtime: mcall function returned") 376 } 377 378 func badreflectcall() { 379 panic(plainError("arg size to reflect.call more than 1GB")) 380 } 381 382 func lockedOSThread() bool { 383 gp := getg() 384 return gp.lockedm != nil && gp.m.lockedg != nil 385 } 386 387 var ( 388 allgs []*g 389 allglock mutex 390 ) 391 392 func allgadd(gp *g) { 393 if readgstatus(gp) == _Gidle { 394 throw("allgadd: bad status Gidle") 395 } 396 397 lock(&allglock) 398 allgs = append(allgs, gp) 399 allglen = uintptr(len(allgs)) 400 401 // Grow GC rescan list if necessary. 402 if len(allgs) > cap(work.rescan.list) { 403 lock(&work.rescan.lock) 404 l := work.rescan.list 405 // Let append do the heavy lifting, but keep the 406 // length the same. 407 work.rescan.list = append(l[:cap(l)], 0)[:len(l)] 408 unlock(&work.rescan.lock) 409 } 410 unlock(&allglock) 411 } 412 413 const ( 414 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 415 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 416 _GoidCacheBatch = 16 417 ) 418 419 // The bootstrap sequence is: 420 // 421 // call osinit 422 // call schedinit 423 // make & queue new G 424 // call runtime·mstart 425 // 426 // The new G calls runtime·main. 427 func schedinit() { 428 // raceinit must be the first call to race detector. 429 // In particular, it must be done before mallocinit below calls racemapshadow. 430 _g_ := getg() 431 if raceenabled { 432 _g_.racectx, raceprocctx0 = raceinit() 433 } 434 435 sched.maxmcount = 10000 436 437 tracebackinit() 438 moduledataverify() 439 stackinit() 440 mallocinit() 441 mcommoninit(_g_.m) 442 alginit() // maps must not be used before this call 443 typelinksinit() // uses maps 444 itabsinit() 445 446 msigsave(_g_.m) 447 initSigmask = _g_.m.sigmask 448 449 goargs() 450 goenvs() 451 parsedebugvars() 452 gcinit() 453 454 sched.lastpoll = uint64(nanotime()) 455 procs := int(ncpu) 456 if procs > _MaxGomaxprocs { 457 procs = _MaxGomaxprocs 458 } 459 if n := atoi(gogetenv("GOMAXPROCS")); n > 0 { 460 if n > _MaxGomaxprocs { 461 n = _MaxGomaxprocs 462 } 463 procs = n 464 } 465 if procresize(int32(procs)) != nil { 466 throw("unknown runnable goroutine during bootstrap") 467 } 468 469 if buildVersion == "" { 470 // Condition should never trigger. This code just serves 471 // to ensure runtime·buildVersion is kept in the resulting binary. 472 buildVersion = "unknown" 473 } 474 } 475 476 func dumpgstatus(gp *g) { 477 _g_ := getg() 478 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 479 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 480 } 481 482 func checkmcount() { 483 // sched lock is held 484 if sched.mcount > sched.maxmcount { 485 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 486 throw("thread exhaustion") 487 } 488 } 489 490 func mcommoninit(mp *m) { 491 _g_ := getg() 492 493 // g0 stack won't make sense for user (and is not necessary unwindable). 494 if _g_ != _g_.m.g0 { 495 callers(1, mp.createstack[:]) 496 } 497 498 mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks()) 499 if mp.fastrand == 0 { 500 mp.fastrand = 0x49f6428a 501 } 502 503 lock(&sched.lock) 504 mp.id = sched.mcount 505 sched.mcount++ 506 checkmcount() 507 mpreinit(mp) 508 if mp.gsignal != nil { 509 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 510 } 511 512 // Add to allm so garbage collector doesn't free g->m 513 // when it is just in a register or thread-local storage. 514 mp.alllink = allm 515 516 // NumCgoCall() iterates over allm w/o schedlock, 517 // so we need to publish it safely. 518 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 519 unlock(&sched.lock) 520 521 // Allocate memory to hold a cgo traceback if the cgo call crashes. 522 if iscgo || GOOS == "solaris" || GOOS == "windows" { 523 mp.cgoCallers = new(cgoCallers) 524 } 525 } 526 527 // Mark gp ready to run. 528 func ready(gp *g, traceskip int, next bool) { 529 if trace.enabled { 530 traceGoUnpark(gp, traceskip) 531 } 532 533 status := readgstatus(gp) 534 535 // Mark runnable. 536 _g_ := getg() 537 _g_.m.locks++ // disable preemption because it can be holding p in a local var 538 if status&^_Gscan != _Gwaiting { 539 dumpgstatus(gp) 540 throw("bad g->status in ready") 541 } 542 543 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 544 casgstatus(gp, _Gwaiting, _Grunnable) 545 runqput(_g_.m.p.ptr(), gp, next) 546 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { // TODO: fast atomic 547 wakep() 548 } 549 _g_.m.locks-- 550 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack 551 _g_.stackguard0 = stackPreempt 552 } 553 } 554 555 func gcprocs() int32 { 556 // Figure out how many CPUs to use during GC. 557 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 558 lock(&sched.lock) 559 n := gomaxprocs 560 if n > ncpu { 561 n = ncpu 562 } 563 if n > _MaxGcproc { 564 n = _MaxGcproc 565 } 566 if n > sched.nmidle+1 { // one M is currently running 567 n = sched.nmidle + 1 568 } 569 unlock(&sched.lock) 570 return n 571 } 572 573 func needaddgcproc() bool { 574 lock(&sched.lock) 575 n := gomaxprocs 576 if n > ncpu { 577 n = ncpu 578 } 579 if n > _MaxGcproc { 580 n = _MaxGcproc 581 } 582 n -= sched.nmidle + 1 // one M is currently running 583 unlock(&sched.lock) 584 return n > 0 585 } 586 587 func helpgc(nproc int32) { 588 _g_ := getg() 589 lock(&sched.lock) 590 pos := 0 591 for n := int32(1); n < nproc; n++ { // one M is currently running 592 if allp[pos].mcache == _g_.m.mcache { 593 pos++ 594 } 595 mp := mget() 596 if mp == nil { 597 throw("gcprocs inconsistency") 598 } 599 mp.helpgc = n 600 mp.p.set(allp[pos]) 601 mp.mcache = allp[pos].mcache 602 pos++ 603 notewakeup(&mp.park) 604 } 605 unlock(&sched.lock) 606 } 607 608 // freezeStopWait is a large value that freezetheworld sets 609 // sched.stopwait to in order to request that all Gs permanently stop. 610 const freezeStopWait = 0x7fffffff 611 612 // Similar to stopTheWorld but best-effort and can be called several times. 613 // There is no reverse operation, used during crashing. 614 // This function must not lock any mutexes. 615 func freezetheworld() { 616 // stopwait and preemption requests can be lost 617 // due to races with concurrently executing threads, 618 // so try several times 619 for i := 0; i < 5; i++ { 620 // this should tell the scheduler to not start any new goroutines 621 sched.stopwait = freezeStopWait 622 atomic.Store(&sched.gcwaiting, 1) 623 // this should stop running goroutines 624 if !preemptall() { 625 break // no running goroutines 626 } 627 usleep(1000) 628 } 629 // to be sure 630 usleep(1000) 631 preemptall() 632 usleep(1000) 633 } 634 635 func isscanstatus(status uint32) bool { 636 if status == _Gscan { 637 throw("isscanstatus: Bad status Gscan") 638 } 639 return status&_Gscan == _Gscan 640 } 641 642 // All reads and writes of g's status go through readgstatus, casgstatus 643 // castogscanstatus, casfrom_Gscanstatus. 644 //go:nosplit 645 func readgstatus(gp *g) uint32 { 646 return atomic.Load(&gp.atomicstatus) 647 } 648 649 // Ownership of gcscanvalid: 650 // 651 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), 652 // then gp owns gp.gcscanvalid, and other goroutines must not modify it. 653 // 654 // Otherwise, a second goroutine can lock the scan state by setting _Gscan 655 // in the status bit and then modify gcscanvalid, and then unlock the scan state. 656 // 657 // Note that the first condition implies an exception to the second: 658 // if a second goroutine changes gp's status to _Grunning|_Gscan, 659 // that second goroutine still does not have the right to modify gcscanvalid. 660 661 // The Gscanstatuses are acting like locks and this releases them. 662 // If it proves to be a performance hit we should be able to make these 663 // simple atomic stores but for now we are going to throw if 664 // we see an inconsistent state. 665 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 666 success := false 667 668 // Check that transition is valid. 669 switch oldval { 670 default: 671 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 672 dumpgstatus(gp) 673 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 674 case _Gscanrunnable, 675 _Gscanwaiting, 676 _Gscanrunning, 677 _Gscansyscall: 678 if newval == oldval&^_Gscan { 679 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 680 } 681 } 682 if !success { 683 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 684 dumpgstatus(gp) 685 throw("casfrom_Gscanstatus: gp->status is not in scan state") 686 } 687 } 688 689 // This will return false if the gp is not in the expected status and the cas fails. 690 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 691 func castogscanstatus(gp *g, oldval, newval uint32) bool { 692 switch oldval { 693 case _Grunnable, 694 _Grunning, 695 _Gwaiting, 696 _Gsyscall: 697 if newval == oldval|_Gscan { 698 return atomic.Cas(&gp.atomicstatus, oldval, newval) 699 } 700 } 701 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 702 throw("castogscanstatus") 703 panic("not reached") 704 } 705 706 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 707 // and casfrom_Gscanstatus instead. 708 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 709 // put it in the Gscan state is finished. 710 //go:nosplit 711 func casgstatus(gp *g, oldval, newval uint32) { 712 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 713 systemstack(func() { 714 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 715 throw("casgstatus: bad incoming values") 716 }) 717 } 718 719 if oldval == _Grunning && gp.gcscanvalid { 720 // If oldvall == _Grunning, then the actual status must be 721 // _Grunning or _Grunning|_Gscan; either way, 722 // we own gp.gcscanvalid, so it's safe to read. 723 // gp.gcscanvalid must not be true when we are running. 724 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n") 725 throw("casgstatus") 726 } 727 728 // See http://golang.org/cl/21503 for justification of the yield delay. 729 const yieldDelay = 5 * 1000 730 var nextYield int64 731 732 // loop if gp->atomicstatus is in a scan state giving 733 // GC time to finish and change the state to oldval. 734 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { 735 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 736 systemstack(func() { 737 throw("casgstatus: waiting for Gwaiting but is Grunnable") 738 }) 739 } 740 // Help GC if needed. 741 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { 742 // gp.preemptscan = false 743 // systemstack(func() { 744 // gcphasework(gp) 745 // }) 746 // } 747 // But meanwhile just yield. 748 if i == 0 { 749 nextYield = nanotime() + yieldDelay 750 } 751 if nanotime() < nextYield { 752 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { 753 procyield(1) 754 } 755 } else { 756 osyield() 757 nextYield = nanotime() + yieldDelay/2 758 } 759 } 760 if newval == _Grunning && gp.gcscanvalid { 761 // Run queueRescan on the system stack so it has more space. 762 systemstack(func() { queueRescan(gp) }) 763 } 764 } 765 766 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 767 // Returns old status. Cannot call casgstatus directly, because we are racing with an 768 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 769 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 770 // it would loop waiting for the status to go back to Gwaiting, which it never will. 771 //go:nosplit 772 func casgcopystack(gp *g) uint32 { 773 for { 774 oldstatus := readgstatus(gp) &^ _Gscan 775 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 776 throw("copystack: bad status, not Gwaiting or Grunnable") 777 } 778 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 779 return oldstatus 780 } 781 } 782 } 783 784 // scang blocks until gp's stack has been scanned. 785 // It might be scanned by scang or it might be scanned by the goroutine itself. 786 // Either way, the stack scan has completed when scang returns. 787 func scang(gp *g, gcw *gcWork) { 788 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone. 789 // Nothing is racing with us now, but gcscandone might be set to true left over 790 // from an earlier round of stack scanning (we scan twice per GC). 791 // We use gcscandone to record whether the scan has been done during this round. 792 // It is important that the scan happens exactly once: if called twice, 793 // the installation of stack barriers will detect the double scan and die. 794 795 gp.gcscandone = false 796 797 // See http://golang.org/cl/21503 for justification of the yield delay. 798 const yieldDelay = 10 * 1000 799 var nextYield int64 800 801 // Endeavor to get gcscandone set to true, 802 // either by doing the stack scan ourselves or by coercing gp to scan itself. 803 // gp.gcscandone can transition from false to true when we're not looking 804 // (if we asked for preemption), so any time we lock the status using 805 // castogscanstatus we have to double-check that the scan is still not done. 806 loop: 807 for i := 0; !gp.gcscandone; i++ { 808 switch s := readgstatus(gp); s { 809 default: 810 dumpgstatus(gp) 811 throw("stopg: invalid status") 812 813 case _Gdead: 814 // No stack. 815 gp.gcscandone = true 816 break loop 817 818 case _Gcopystack: 819 // Stack being switched. Go around again. 820 821 case _Grunnable, _Gsyscall, _Gwaiting: 822 // Claim goroutine by setting scan bit. 823 // Racing with execution or readying of gp. 824 // The scan bit keeps them from running 825 // the goroutine until we're done. 826 if castogscanstatus(gp, s, s|_Gscan) { 827 if !gp.gcscandone { 828 scanstack(gp, gcw) 829 gp.gcscandone = true 830 } 831 restartg(gp) 832 break loop 833 } 834 835 case _Gscanwaiting: 836 // newstack is doing a scan for us right now. Wait. 837 838 case _Grunning: 839 // Goroutine running. Try to preempt execution so it can scan itself. 840 // The preemption handler (in newstack) does the actual scan. 841 842 // Optimization: if there is already a pending preemption request 843 // (from the previous loop iteration), don't bother with the atomics. 844 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt { 845 break 846 } 847 848 // Ask for preemption and self scan. 849 if castogscanstatus(gp, _Grunning, _Gscanrunning) { 850 if !gp.gcscandone { 851 gp.preemptscan = true 852 gp.preempt = true 853 gp.stackguard0 = stackPreempt 854 } 855 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning) 856 } 857 } 858 859 if i == 0 { 860 nextYield = nanotime() + yieldDelay 861 } 862 if nanotime() < nextYield { 863 procyield(10) 864 } else { 865 osyield() 866 nextYield = nanotime() + yieldDelay/2 867 } 868 } 869 870 gp.preemptscan = false // cancel scan request if no longer needed 871 } 872 873 // The GC requests that this routine be moved from a scanmumble state to a mumble state. 874 func restartg(gp *g) { 875 s := readgstatus(gp) 876 switch s { 877 default: 878 dumpgstatus(gp) 879 throw("restartg: unexpected status") 880 881 case _Gdead: 882 // ok 883 884 case _Gscanrunnable, 885 _Gscanwaiting, 886 _Gscansyscall: 887 casfrom_Gscanstatus(gp, s, s&^_Gscan) 888 } 889 } 890 891 // stopTheWorld stops all P's from executing goroutines, interrupting 892 // all goroutines at GC safe points and records reason as the reason 893 // for the stop. On return, only the current goroutine's P is running. 894 // stopTheWorld must not be called from a system stack and the caller 895 // must not hold worldsema. The caller must call startTheWorld when 896 // other P's should resume execution. 897 // 898 // stopTheWorld is safe for multiple goroutines to call at the 899 // same time. Each will execute its own stop, and the stops will 900 // be serialized. 901 // 902 // This is also used by routines that do stack dumps. If the system is 903 // in panic or being exited, this may not reliably stop all 904 // goroutines. 905 func stopTheWorld(reason string) { 906 semacquire(&worldsema, false) 907 getg().m.preemptoff = reason 908 systemstack(stopTheWorldWithSema) 909 } 910 911 // startTheWorld undoes the effects of stopTheWorld. 912 func startTheWorld() { 913 systemstack(startTheWorldWithSema) 914 // worldsema must be held over startTheWorldWithSema to ensure 915 // gomaxprocs cannot change while worldsema is held. 916 semrelease(&worldsema) 917 getg().m.preemptoff = "" 918 } 919 920 // Holding worldsema grants an M the right to try to stop the world 921 // and prevents gomaxprocs from changing concurrently. 922 var worldsema uint32 = 1 923 924 // stopTheWorldWithSema is the core implementation of stopTheWorld. 925 // The caller is responsible for acquiring worldsema and disabling 926 // preemption first and then should stopTheWorldWithSema on the system 927 // stack: 928 // 929 // semacquire(&worldsema, false) 930 // m.preemptoff = "reason" 931 // systemstack(stopTheWorldWithSema) 932 // 933 // When finished, the caller must either call startTheWorld or undo 934 // these three operations separately: 935 // 936 // m.preemptoff = "" 937 // systemstack(startTheWorldWithSema) 938 // semrelease(&worldsema) 939 // 940 // It is allowed to acquire worldsema once and then execute multiple 941 // startTheWorldWithSema/stopTheWorldWithSema pairs. 942 // Other P's are able to execute between successive calls to 943 // startTheWorldWithSema and stopTheWorldWithSema. 944 // Holding worldsema causes any other goroutines invoking 945 // stopTheWorld to block. 946 func stopTheWorldWithSema() { 947 _g_ := getg() 948 949 // If we hold a lock, then we won't be able to stop another M 950 // that is blocked trying to acquire the lock. 951 if _g_.m.locks > 0 { 952 throw("stopTheWorld: holding locks") 953 } 954 955 lock(&sched.lock) 956 sched.stopwait = gomaxprocs 957 atomic.Store(&sched.gcwaiting, 1) 958 preemptall() 959 // stop current P 960 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 961 sched.stopwait-- 962 // try to retake all P's in Psyscall status 963 for i := 0; i < int(gomaxprocs); i++ { 964 p := allp[i] 965 s := p.status 966 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 967 if trace.enabled { 968 traceGoSysBlock(p) 969 traceProcStop(p) 970 } 971 p.syscalltick++ 972 sched.stopwait-- 973 } 974 } 975 // stop idle P's 976 for { 977 p := pidleget() 978 if p == nil { 979 break 980 } 981 p.status = _Pgcstop 982 sched.stopwait-- 983 } 984 wait := sched.stopwait > 0 985 unlock(&sched.lock) 986 987 // wait for remaining P's to stop voluntarily 988 if wait { 989 for { 990 // wait for 100us, then try to re-preempt in case of any races 991 if notetsleep(&sched.stopnote, 100*1000) { 992 noteclear(&sched.stopnote) 993 break 994 } 995 preemptall() 996 } 997 } 998 if sched.stopwait != 0 { 999 throw("stopTheWorld: not stopped") 1000 } 1001 for i := 0; i < int(gomaxprocs); i++ { 1002 p := allp[i] 1003 if p.status != _Pgcstop { 1004 throw("stopTheWorld: not stopped") 1005 } 1006 } 1007 } 1008 1009 func mhelpgc() { 1010 _g_ := getg() 1011 _g_.m.helpgc = -1 1012 } 1013 1014 func startTheWorldWithSema() { 1015 _g_ := getg() 1016 1017 _g_.m.locks++ // disable preemption because it can be holding p in a local var 1018 gp := netpoll(false) // non-blocking 1019 injectglist(gp) 1020 add := needaddgcproc() 1021 lock(&sched.lock) 1022 1023 procs := gomaxprocs 1024 if newprocs != 0 { 1025 procs = newprocs 1026 newprocs = 0 1027 } 1028 p1 := procresize(procs) 1029 sched.gcwaiting = 0 1030 if sched.sysmonwait != 0 { 1031 sched.sysmonwait = 0 1032 notewakeup(&sched.sysmonnote) 1033 } 1034 unlock(&sched.lock) 1035 1036 for p1 != nil { 1037 p := p1 1038 p1 = p1.link.ptr() 1039 if p.m != 0 { 1040 mp := p.m.ptr() 1041 p.m = 0 1042 if mp.nextp != 0 { 1043 throw("startTheWorld: inconsistent mp->nextp") 1044 } 1045 mp.nextp.set(p) 1046 notewakeup(&mp.park) 1047 } else { 1048 // Start M to run P. Do not start another M below. 1049 newm(nil, p) 1050 add = false 1051 } 1052 } 1053 1054 // Wakeup an additional proc in case we have excessive runnable goroutines 1055 // in local queues or in the global queue. If we don't, the proc will park itself. 1056 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 1057 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 1058 wakep() 1059 } 1060 1061 if add { 1062 // If GC could have used another helper proc, start one now, 1063 // in the hope that it will be available next time. 1064 // It would have been even better to start it before the collection, 1065 // but doing so requires allocating memory, so it's tricky to 1066 // coordinate. This lazy approach works out in practice: 1067 // we don't mind if the first couple gc rounds don't have quite 1068 // the maximum number of procs. 1069 newm(mhelpgc, nil) 1070 } 1071 _g_.m.locks-- 1072 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1073 _g_.stackguard0 = stackPreempt 1074 } 1075 } 1076 1077 // Called to start an M. 1078 //go:nosplit 1079 func mstart() { 1080 _g_ := getg() 1081 1082 if _g_.stack.lo == 0 { 1083 // Initialize stack bounds from system stack. 1084 // Cgo may have left stack size in stack.hi. 1085 size := _g_.stack.hi 1086 if size == 0 { 1087 size = 8192 * sys.StackGuardMultiplier 1088 } 1089 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1090 _g_.stack.lo = _g_.stack.hi - size + 1024 1091 } 1092 // Initialize stack guards so that we can start calling 1093 // both Go and C functions with stack growth prologues. 1094 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1095 _g_.stackguard1 = _g_.stackguard0 1096 mstart1() 1097 } 1098 1099 func mstart1() { 1100 _g_ := getg() 1101 1102 if _g_ != _g_.m.g0 { 1103 throw("bad runtime·mstart") 1104 } 1105 1106 // Record top of stack for use by mcall. 1107 // Once we call schedule we're never coming back, 1108 // so other calls can reuse this stack space. 1109 gosave(&_g_.m.g0.sched) 1110 _g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used 1111 asminit() 1112 minit() 1113 1114 // Install signal handlers; after minit so that minit can 1115 // prepare the thread to be able to handle the signals. 1116 if _g_.m == &m0 { 1117 // Create an extra M for callbacks on threads not created by Go. 1118 if iscgo && !cgoHasExtraM { 1119 cgoHasExtraM = true 1120 newextram() 1121 } 1122 initsig(false) 1123 } 1124 1125 if fn := _g_.m.mstartfn; fn != nil { 1126 fn() 1127 } 1128 1129 if _g_.m.helpgc != 0 { 1130 _g_.m.helpgc = 0 1131 stopm() 1132 } else if _g_.m != &m0 { 1133 acquirep(_g_.m.nextp.ptr()) 1134 _g_.m.nextp = 0 1135 } 1136 schedule() 1137 } 1138 1139 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1140 // If a P is currently executing code, this will bring the P to a GC 1141 // safe point and execute fn on that P. If the P is not executing code 1142 // (it is idle or in a syscall), this will call fn(p) directly while 1143 // preventing the P from exiting its state. This does not ensure that 1144 // fn will run on every CPU executing Go code, but it acts as a global 1145 // memory barrier. GC uses this as a "ragged barrier." 1146 // 1147 // The caller must hold worldsema. 1148 // 1149 //go:systemstack 1150 func forEachP(fn func(*p)) { 1151 mp := acquirem() 1152 _p_ := getg().m.p.ptr() 1153 1154 lock(&sched.lock) 1155 if sched.safePointWait != 0 { 1156 throw("forEachP: sched.safePointWait != 0") 1157 } 1158 sched.safePointWait = gomaxprocs - 1 1159 sched.safePointFn = fn 1160 1161 // Ask all Ps to run the safe point function. 1162 for _, p := range allp[:gomaxprocs] { 1163 if p != _p_ { 1164 atomic.Store(&p.runSafePointFn, 1) 1165 } 1166 } 1167 preemptall() 1168 1169 // Any P entering _Pidle or _Psyscall from now on will observe 1170 // p.runSafePointFn == 1 and will call runSafePointFn when 1171 // changing its status to _Pidle/_Psyscall. 1172 1173 // Run safe point function for all idle Ps. sched.pidle will 1174 // not change because we hold sched.lock. 1175 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1176 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1177 fn(p) 1178 sched.safePointWait-- 1179 } 1180 } 1181 1182 wait := sched.safePointWait > 0 1183 unlock(&sched.lock) 1184 1185 // Run fn for the current P. 1186 fn(_p_) 1187 1188 // Force Ps currently in _Psyscall into _Pidle and hand them 1189 // off to induce safe point function execution. 1190 for i := 0; i < int(gomaxprocs); i++ { 1191 p := allp[i] 1192 s := p.status 1193 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1194 if trace.enabled { 1195 traceGoSysBlock(p) 1196 traceProcStop(p) 1197 } 1198 p.syscalltick++ 1199 handoffp(p) 1200 } 1201 } 1202 1203 // Wait for remaining Ps to run fn. 1204 if wait { 1205 for { 1206 // Wait for 100us, then try to re-preempt in 1207 // case of any races. 1208 // 1209 // Requires system stack. 1210 if notetsleep(&sched.safePointNote, 100*1000) { 1211 noteclear(&sched.safePointNote) 1212 break 1213 } 1214 preemptall() 1215 } 1216 } 1217 if sched.safePointWait != 0 { 1218 throw("forEachP: not done") 1219 } 1220 for i := 0; i < int(gomaxprocs); i++ { 1221 p := allp[i] 1222 if p.runSafePointFn != 0 { 1223 throw("forEachP: P did not run fn") 1224 } 1225 } 1226 1227 lock(&sched.lock) 1228 sched.safePointFn = nil 1229 unlock(&sched.lock) 1230 releasem(mp) 1231 } 1232 1233 // runSafePointFn runs the safe point function, if any, for this P. 1234 // This should be called like 1235 // 1236 // if getg().m.p.runSafePointFn != 0 { 1237 // runSafePointFn() 1238 // } 1239 // 1240 // runSafePointFn must be checked on any transition in to _Pidle or 1241 // _Psyscall to avoid a race where forEachP sees that the P is running 1242 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1243 // nor the P run the safe-point function. 1244 func runSafePointFn() { 1245 p := getg().m.p.ptr() 1246 // Resolve the race between forEachP running the safe-point 1247 // function on this P's behalf and this P running the 1248 // safe-point function directly. 1249 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1250 return 1251 } 1252 sched.safePointFn(p) 1253 lock(&sched.lock) 1254 sched.safePointWait-- 1255 if sched.safePointWait == 0 { 1256 notewakeup(&sched.safePointNote) 1257 } 1258 unlock(&sched.lock) 1259 } 1260 1261 // When running with cgo, we call _cgo_thread_start 1262 // to start threads for us so that we can play nicely with 1263 // foreign code. 1264 var cgoThreadStart unsafe.Pointer 1265 1266 type cgothreadstart struct { 1267 g guintptr 1268 tls *uint64 1269 fn unsafe.Pointer 1270 } 1271 1272 // Allocate a new m unassociated with any thread. 1273 // Can use p for allocation context if needed. 1274 // fn is recorded as the new m's m.mstartfn. 1275 // 1276 // This function it known to the compiler to inhibit the 1277 // go:nowritebarrierrec annotation because it uses P for allocation. 1278 func allocm(_p_ *p, fn func()) *m { 1279 _g_ := getg() 1280 _g_.m.locks++ // disable GC because it can be called from sysmon 1281 if _g_.m.p == 0 { 1282 acquirep(_p_) // temporarily borrow p for mallocs in this function 1283 } 1284 mp := new(m) 1285 mp.mstartfn = fn 1286 mcommoninit(mp) 1287 1288 // In case of cgo or Solaris, pthread_create will make us a stack. 1289 // Windows and Plan 9 will layout sched stack on OS stack. 1290 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" { 1291 mp.g0 = malg(-1) 1292 } else { 1293 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1294 } 1295 mp.g0.m = mp 1296 1297 if _p_ == _g_.m.p.ptr() { 1298 releasep() 1299 } 1300 _g_.m.locks-- 1301 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1302 _g_.stackguard0 = stackPreempt 1303 } 1304 1305 return mp 1306 } 1307 1308 // needm is called when a cgo callback happens on a 1309 // thread without an m (a thread not created by Go). 1310 // In this case, needm is expected to find an m to use 1311 // and return with m, g initialized correctly. 1312 // Since m and g are not set now (likely nil, but see below) 1313 // needm is limited in what routines it can call. In particular 1314 // it can only call nosplit functions (textflag 7) and cannot 1315 // do any scheduling that requires an m. 1316 // 1317 // In order to avoid needing heavy lifting here, we adopt 1318 // the following strategy: there is a stack of available m's 1319 // that can be stolen. Using compare-and-swap 1320 // to pop from the stack has ABA races, so we simulate 1321 // a lock by doing an exchange (via casp) to steal the stack 1322 // head and replace the top pointer with MLOCKED (1). 1323 // This serves as a simple spin lock that we can use even 1324 // without an m. The thread that locks the stack in this way 1325 // unlocks the stack by storing a valid stack head pointer. 1326 // 1327 // In order to make sure that there is always an m structure 1328 // available to be stolen, we maintain the invariant that there 1329 // is always one more than needed. At the beginning of the 1330 // program (if cgo is in use) the list is seeded with a single m. 1331 // If needm finds that it has taken the last m off the list, its job 1332 // is - once it has installed its own m so that it can do things like 1333 // allocate memory - to create a spare m and put it on the list. 1334 // 1335 // Each of these extra m's also has a g0 and a curg that are 1336 // pressed into service as the scheduling stack and current 1337 // goroutine for the duration of the cgo callback. 1338 // 1339 // When the callback is done with the m, it calls dropm to 1340 // put the m back on the list. 1341 //go:nosplit 1342 func needm(x byte) { 1343 if iscgo && !cgoHasExtraM { 1344 // Can happen if C/C++ code calls Go from a global ctor. 1345 // Can not throw, because scheduler is not initialized yet. 1346 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1347 exit(1) 1348 } 1349 1350 // Lock extra list, take head, unlock popped list. 1351 // nilokay=false is safe here because of the invariant above, 1352 // that the extra list always contains or will soon contain 1353 // at least one m. 1354 mp := lockextra(false) 1355 1356 // Set needextram when we've just emptied the list, 1357 // so that the eventual call into cgocallbackg will 1358 // allocate a new m for the extra list. We delay the 1359 // allocation until then so that it can be done 1360 // after exitsyscall makes sure it is okay to be 1361 // running at all (that is, there's no garbage collection 1362 // running right now). 1363 mp.needextram = mp.schedlink == 0 1364 unlockextra(mp.schedlink.ptr()) 1365 1366 // Save and block signals before installing g. 1367 // Once g is installed, any incoming signals will try to execute, 1368 // but we won't have the sigaltstack settings and other data 1369 // set up appropriately until the end of minit, which will 1370 // unblock the signals. This is the same dance as when 1371 // starting a new m to run Go code via newosproc. 1372 msigsave(mp) 1373 sigblock() 1374 1375 // Install g (= m->g0) and set the stack bounds 1376 // to match the current stack. We don't actually know 1377 // how big the stack is, like we don't know how big any 1378 // scheduling stack is, but we assume there's at least 32 kB, 1379 // which is more than enough for us. 1380 setg(mp.g0) 1381 _g_ := getg() 1382 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 1383 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 1384 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1385 1386 // Initialize this thread to use the m. 1387 asminit() 1388 minit() 1389 } 1390 1391 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 1392 1393 // newextram allocates m's and puts them on the extra list. 1394 // It is called with a working local m, so that it can do things 1395 // like call schedlock and allocate. 1396 func newextram() { 1397 c := atomic.Xchg(&extraMWaiters, 0) 1398 if c > 0 { 1399 for i := uint32(0); i < c; i++ { 1400 oneNewExtraM() 1401 } 1402 } else { 1403 // Make sure there is at least one extra M. 1404 mp := lockextra(true) 1405 unlockextra(mp) 1406 if mp == nil { 1407 oneNewExtraM() 1408 } 1409 } 1410 } 1411 1412 // oneNewExtraM allocates an m and puts it on the extra list. 1413 func oneNewExtraM() { 1414 // Create extra goroutine locked to extra m. 1415 // The goroutine is the context in which the cgo callback will run. 1416 // The sched.pc will never be returned to, but setting it to 1417 // goexit makes clear to the traceback routines where 1418 // the goroutine stack ends. 1419 mp := allocm(nil, nil) 1420 gp := malg(4096) 1421 gp.sched.pc = funcPC(goexit) + sys.PCQuantum 1422 gp.sched.sp = gp.stack.hi 1423 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame 1424 gp.sched.lr = 0 1425 gp.sched.g = guintptr(unsafe.Pointer(gp)) 1426 gp.syscallpc = gp.sched.pc 1427 gp.syscallsp = gp.sched.sp 1428 gp.stktopsp = gp.sched.sp 1429 gp.gcscanvalid = true // fresh G, so no dequeueRescan necessary 1430 gp.gcRescan = -1 1431 // malg returns status as Gidle, change to Gsyscall before adding to allg 1432 // where GC will see it. 1433 casgstatus(gp, _Gidle, _Gsyscall) 1434 gp.m = mp 1435 mp.curg = gp 1436 mp.locked = _LockInternal 1437 mp.lockedg = gp 1438 gp.lockedm = mp 1439 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 1440 if raceenabled { 1441 gp.racectx = racegostart(funcPC(newextram)) 1442 } 1443 // put on allg for garbage collector 1444 allgadd(gp) 1445 1446 // Add m to the extra list. 1447 mnext := lockextra(true) 1448 mp.schedlink.set(mnext) 1449 unlockextra(mp) 1450 } 1451 1452 // dropm is called when a cgo callback has called needm but is now 1453 // done with the callback and returning back into the non-Go thread. 1454 // It puts the current m back onto the extra list. 1455 // 1456 // The main expense here is the call to signalstack to release the 1457 // m's signal stack, and then the call to needm on the next callback 1458 // from this thread. It is tempting to try to save the m for next time, 1459 // which would eliminate both these costs, but there might not be 1460 // a next time: the current thread (which Go does not control) might exit. 1461 // If we saved the m for that thread, there would be an m leak each time 1462 // such a thread exited. Instead, we acquire and release an m on each 1463 // call. These should typically not be scheduling operations, just a few 1464 // atomics, so the cost should be small. 1465 // 1466 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1467 // variable using pthread_key_create. Unlike the pthread keys we already use 1468 // on OS X, this dummy key would never be read by Go code. It would exist 1469 // only so that we could register at thread-exit-time destructor. 1470 // That destructor would put the m back onto the extra list. 1471 // This is purely a performance optimization. The current version, 1472 // in which dropm happens on each cgo call, is still correct too. 1473 // We may have to keep the current version on systems with cgo 1474 // but without pthreads, like Windows. 1475 func dropm() { 1476 // Clear m and g, and return m to the extra list. 1477 // After the call to setg we can only call nosplit functions 1478 // with no pointer manipulation. 1479 mp := getg().m 1480 1481 // Block signals before unminit. 1482 // Unminit unregisters the signal handling stack (but needs g on some systems). 1483 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 1484 // It's important not to try to handle a signal between those two steps. 1485 sigmask := mp.sigmask 1486 sigblock() 1487 unminit() 1488 1489 mnext := lockextra(true) 1490 mp.schedlink.set(mnext) 1491 1492 setg(nil) 1493 1494 // Commit the release of mp. 1495 unlockextra(mp) 1496 1497 msigrestore(sigmask) 1498 } 1499 1500 // A helper function for EnsureDropM. 1501 func getm() uintptr { 1502 return uintptr(unsafe.Pointer(getg().m)) 1503 } 1504 1505 var extram uintptr 1506 var extraMWaiters uint32 1507 1508 // lockextra locks the extra list and returns the list head. 1509 // The caller must unlock the list by storing a new list head 1510 // to extram. If nilokay is true, then lockextra will 1511 // return a nil list head if that's what it finds. If nilokay is false, 1512 // lockextra will keep waiting until the list head is no longer nil. 1513 //go:nosplit 1514 func lockextra(nilokay bool) *m { 1515 const locked = 1 1516 1517 incr := false 1518 for { 1519 old := atomic.Loaduintptr(&extram) 1520 if old == locked { 1521 yield := osyield 1522 yield() 1523 continue 1524 } 1525 if old == 0 && !nilokay { 1526 if !incr { 1527 // Add 1 to the number of threads 1528 // waiting for an M. 1529 // This is cleared by newextram. 1530 atomic.Xadd(&extraMWaiters, 1) 1531 incr = true 1532 } 1533 usleep(1) 1534 continue 1535 } 1536 if atomic.Casuintptr(&extram, old, locked) { 1537 return (*m)(unsafe.Pointer(old)) 1538 } 1539 yield := osyield 1540 yield() 1541 continue 1542 } 1543 } 1544 1545 //go:nosplit 1546 func unlockextra(mp *m) { 1547 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1548 } 1549 1550 // Create a new m. It will start off with a call to fn, or else the scheduler. 1551 // fn needs to be static and not a heap allocated closure. 1552 // May run with m.p==nil, so write barriers are not allowed. 1553 //go:nowritebarrier 1554 func newm(fn func(), _p_ *p) { 1555 mp := allocm(_p_, fn) 1556 mp.nextp.set(_p_) 1557 mp.sigmask = initSigmask 1558 if iscgo { 1559 var ts cgothreadstart 1560 if _cgo_thread_start == nil { 1561 throw("_cgo_thread_start missing") 1562 } 1563 ts.g.set(mp.g0) 1564 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1565 ts.fn = unsafe.Pointer(funcPC(mstart)) 1566 if msanenabled { 1567 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 1568 } 1569 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1570 return 1571 } 1572 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi)) 1573 } 1574 1575 // Stops execution of the current m until new work is available. 1576 // Returns with acquired P. 1577 func stopm() { 1578 _g_ := getg() 1579 1580 if _g_.m.locks != 0 { 1581 throw("stopm holding locks") 1582 } 1583 if _g_.m.p != 0 { 1584 throw("stopm holding p") 1585 } 1586 if _g_.m.spinning { 1587 throw("stopm spinning") 1588 } 1589 1590 retry: 1591 lock(&sched.lock) 1592 mput(_g_.m) 1593 unlock(&sched.lock) 1594 notesleep(&_g_.m.park) 1595 noteclear(&_g_.m.park) 1596 if _g_.m.helpgc != 0 { 1597 gchelper() 1598 _g_.m.helpgc = 0 1599 _g_.m.mcache = nil 1600 _g_.m.p = 0 1601 goto retry 1602 } 1603 acquirep(_g_.m.nextp.ptr()) 1604 _g_.m.nextp = 0 1605 } 1606 1607 func mspinning() { 1608 // startm's caller incremented nmspinning. Set the new M's spinning. 1609 getg().m.spinning = true 1610 } 1611 1612 // Schedules some M to run the p (creates an M if necessary). 1613 // If p==nil, tries to get an idle P, if no idle P's does nothing. 1614 // May run with m.p==nil, so write barriers are not allowed. 1615 // If spinning is set, the caller has incremented nmspinning and startm will 1616 // either decrement nmspinning or set m.spinning in the newly started M. 1617 //go:nowritebarrier 1618 func startm(_p_ *p, spinning bool) { 1619 lock(&sched.lock) 1620 if _p_ == nil { 1621 _p_ = pidleget() 1622 if _p_ == nil { 1623 unlock(&sched.lock) 1624 if spinning { 1625 // The caller incremented nmspinning, but there are no idle Ps, 1626 // so it's okay to just undo the increment and give up. 1627 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1628 throw("startm: negative nmspinning") 1629 } 1630 } 1631 return 1632 } 1633 } 1634 mp := mget() 1635 unlock(&sched.lock) 1636 if mp == nil { 1637 var fn func() 1638 if spinning { 1639 // The caller incremented nmspinning, so set m.spinning in the new M. 1640 fn = mspinning 1641 } 1642 newm(fn, _p_) 1643 return 1644 } 1645 if mp.spinning { 1646 throw("startm: m is spinning") 1647 } 1648 if mp.nextp != 0 { 1649 throw("startm: m has p") 1650 } 1651 if spinning && !runqempty(_p_) { 1652 throw("startm: p has runnable gs") 1653 } 1654 // The caller incremented nmspinning, so set m.spinning in the new M. 1655 mp.spinning = spinning 1656 mp.nextp.set(_p_) 1657 notewakeup(&mp.park) 1658 } 1659 1660 // Hands off P from syscall or locked M. 1661 // Always runs without a P, so write barriers are not allowed. 1662 //go:nowritebarrier 1663 func handoffp(_p_ *p) { 1664 // handoffp must start an M in any situation where 1665 // findrunnable would return a G to run on _p_. 1666 1667 // if it has local work, start it straight away 1668 if !runqempty(_p_) || sched.runqsize != 0 { 1669 startm(_p_, false) 1670 return 1671 } 1672 // if it has GC work, start it straight away 1673 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 1674 startm(_p_, false) 1675 return 1676 } 1677 // no local work, check that there are no spinning/idle M's, 1678 // otherwise our help is not required 1679 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 1680 startm(_p_, true) 1681 return 1682 } 1683 lock(&sched.lock) 1684 if sched.gcwaiting != 0 { 1685 _p_.status = _Pgcstop 1686 sched.stopwait-- 1687 if sched.stopwait == 0 { 1688 notewakeup(&sched.stopnote) 1689 } 1690 unlock(&sched.lock) 1691 return 1692 } 1693 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 1694 sched.safePointFn(_p_) 1695 sched.safePointWait-- 1696 if sched.safePointWait == 0 { 1697 notewakeup(&sched.safePointNote) 1698 } 1699 } 1700 if sched.runqsize != 0 { 1701 unlock(&sched.lock) 1702 startm(_p_, false) 1703 return 1704 } 1705 // If this is the last running P and nobody is polling network, 1706 // need to wakeup another M to poll network. 1707 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 1708 unlock(&sched.lock) 1709 startm(_p_, false) 1710 return 1711 } 1712 pidleput(_p_) 1713 unlock(&sched.lock) 1714 } 1715 1716 // Tries to add one more P to execute G's. 1717 // Called when a G is made runnable (newproc, ready). 1718 func wakep() { 1719 // be conservative about spinning threads 1720 if !atomic.Cas(&sched.nmspinning, 0, 1) { 1721 return 1722 } 1723 startm(nil, true) 1724 } 1725 1726 // Stops execution of the current m that is locked to a g until the g is runnable again. 1727 // Returns with acquired P. 1728 func stoplockedm() { 1729 _g_ := getg() 1730 1731 if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m { 1732 throw("stoplockedm: inconsistent locking") 1733 } 1734 if _g_.m.p != 0 { 1735 // Schedule another M to run this p. 1736 _p_ := releasep() 1737 handoffp(_p_) 1738 } 1739 incidlelocked(1) 1740 // Wait until another thread schedules lockedg again. 1741 notesleep(&_g_.m.park) 1742 noteclear(&_g_.m.park) 1743 status := readgstatus(_g_.m.lockedg) 1744 if status&^_Gscan != _Grunnable { 1745 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 1746 dumpgstatus(_g_) 1747 throw("stoplockedm: not runnable") 1748 } 1749 acquirep(_g_.m.nextp.ptr()) 1750 _g_.m.nextp = 0 1751 } 1752 1753 // Schedules the locked m to run the locked gp. 1754 // May run during STW, so write barriers are not allowed. 1755 //go:nowritebarrier 1756 func startlockedm(gp *g) { 1757 _g_ := getg() 1758 1759 mp := gp.lockedm 1760 if mp == _g_.m { 1761 throw("startlockedm: locked to me") 1762 } 1763 if mp.nextp != 0 { 1764 throw("startlockedm: m has p") 1765 } 1766 // directly handoff current P to the locked m 1767 incidlelocked(-1) 1768 _p_ := releasep() 1769 mp.nextp.set(_p_) 1770 notewakeup(&mp.park) 1771 stopm() 1772 } 1773 1774 // Stops the current m for stopTheWorld. 1775 // Returns when the world is restarted. 1776 func gcstopm() { 1777 _g_ := getg() 1778 1779 if sched.gcwaiting == 0 { 1780 throw("gcstopm: not waiting for gc") 1781 } 1782 if _g_.m.spinning { 1783 _g_.m.spinning = false 1784 // OK to just drop nmspinning here, 1785 // startTheWorld will unpark threads as necessary. 1786 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1787 throw("gcstopm: negative nmspinning") 1788 } 1789 } 1790 _p_ := releasep() 1791 lock(&sched.lock) 1792 _p_.status = _Pgcstop 1793 sched.stopwait-- 1794 if sched.stopwait == 0 { 1795 notewakeup(&sched.stopnote) 1796 } 1797 unlock(&sched.lock) 1798 stopm() 1799 } 1800 1801 // Schedules gp to run on the current M. 1802 // If inheritTime is true, gp inherits the remaining time in the 1803 // current time slice. Otherwise, it starts a new time slice. 1804 // Never returns. 1805 func execute(gp *g, inheritTime bool) { 1806 _g_ := getg() 1807 1808 casgstatus(gp, _Grunnable, _Grunning) 1809 gp.waitsince = 0 1810 gp.preempt = false 1811 gp.stackguard0 = gp.stack.lo + _StackGuard 1812 if !inheritTime { 1813 _g_.m.p.ptr().schedtick++ 1814 } 1815 _g_.m.curg = gp 1816 gp.m = _g_.m 1817 1818 // Check whether the profiler needs to be turned on or off. 1819 hz := sched.profilehz 1820 if _g_.m.profilehz != hz { 1821 resetcpuprofiler(hz) 1822 } 1823 1824 if trace.enabled { 1825 // GoSysExit has to happen when we have a P, but before GoStart. 1826 // So we emit it here. 1827 if gp.syscallsp != 0 && gp.sysblocktraced { 1828 traceGoSysExit(gp.sysexitticks) 1829 } 1830 traceGoStart() 1831 } 1832 1833 gogo(&gp.sched) 1834 } 1835 1836 // Finds a runnable goroutine to execute. 1837 // Tries to steal from other P's, get g from global queue, poll network. 1838 func findrunnable() (gp *g, inheritTime bool) { 1839 _g_ := getg() 1840 1841 // The conditions here and in handoffp must agree: if 1842 // findrunnable would return a G to run, handoffp must start 1843 // an M. 1844 1845 top: 1846 _p_ := _g_.m.p.ptr() 1847 if sched.gcwaiting != 0 { 1848 gcstopm() 1849 goto top 1850 } 1851 if _p_.runSafePointFn != 0 { 1852 runSafePointFn() 1853 } 1854 if fingwait && fingwake { 1855 if gp := wakefing(); gp != nil { 1856 ready(gp, 0, true) 1857 } 1858 } 1859 1860 // local runq 1861 if gp, inheritTime := runqget(_p_); gp != nil { 1862 return gp, inheritTime 1863 } 1864 1865 // global runq 1866 if sched.runqsize != 0 { 1867 lock(&sched.lock) 1868 gp := globrunqget(_p_, 0) 1869 unlock(&sched.lock) 1870 if gp != nil { 1871 return gp, false 1872 } 1873 } 1874 1875 // Poll network. 1876 // This netpoll is only an optimization before we resort to stealing. 1877 // We can safely skip it if there a thread blocked in netpoll already. 1878 // If there is any kind of logical race with that blocked thread 1879 // (e.g. it has already returned from netpoll, but does not set lastpoll yet), 1880 // this thread will do blocking netpoll below anyway. 1881 if netpollinited() && sched.lastpoll != 0 { 1882 if gp := netpoll(false); gp != nil { // non-blocking 1883 // netpoll returns list of goroutines linked by schedlink. 1884 injectglist(gp.schedlink.ptr()) 1885 casgstatus(gp, _Gwaiting, _Grunnable) 1886 if trace.enabled { 1887 traceGoUnpark(gp, 0) 1888 } 1889 return gp, false 1890 } 1891 } 1892 1893 // Steal work from other P's. 1894 procs := uint32(gomaxprocs) 1895 if atomic.Load(&sched.npidle) == procs-1 { 1896 // Either GOMAXPROCS=1 or everybody, except for us, is idle already. 1897 // New work can appear from returning syscall/cgocall, network or timers. 1898 // Neither of that submits to local run queues, so no point in stealing. 1899 goto stop 1900 } 1901 // If number of spinning M's >= number of busy P's, block. 1902 // This is necessary to prevent excessive CPU consumption 1903 // when GOMAXPROCS>>1 but the program parallelism is low. 1904 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { // TODO: fast atomic 1905 goto stop 1906 } 1907 if !_g_.m.spinning { 1908 _g_.m.spinning = true 1909 atomic.Xadd(&sched.nmspinning, 1) 1910 } 1911 for i := 0; i < 4; i++ { 1912 for enum := stealOrder.start(fastrand1()); !enum.done(); enum.next() { 1913 if sched.gcwaiting != 0 { 1914 goto top 1915 } 1916 stealRunNextG := i > 2 // first look for ready queues with more than 1 g 1917 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil { 1918 return gp, false 1919 } 1920 } 1921 } 1922 1923 stop: 1924 1925 // We have nothing to do. If we're in the GC mark phase, can 1926 // safely scan and blacken objects, and have work to do, run 1927 // idle-time marking rather than give up the P. 1928 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) { 1929 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 1930 gp := _p_.gcBgMarkWorker.ptr() 1931 casgstatus(gp, _Gwaiting, _Grunnable) 1932 if trace.enabled { 1933 traceGoUnpark(gp, 0) 1934 } 1935 return gp, false 1936 } 1937 1938 // return P and block 1939 lock(&sched.lock) 1940 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { 1941 unlock(&sched.lock) 1942 goto top 1943 } 1944 if sched.runqsize != 0 { 1945 gp := globrunqget(_p_, 0) 1946 unlock(&sched.lock) 1947 return gp, false 1948 } 1949 if releasep() != _p_ { 1950 throw("findrunnable: wrong p") 1951 } 1952 pidleput(_p_) 1953 unlock(&sched.lock) 1954 1955 // Delicate dance: thread transitions from spinning to non-spinning state, 1956 // potentially concurrently with submission of new goroutines. We must 1957 // drop nmspinning first and then check all per-P queues again (with 1958 // #StoreLoad memory barrier in between). If we do it the other way around, 1959 // another thread can submit a goroutine after we've checked all run queues 1960 // but before we drop nmspinning; as the result nobody will unpark a thread 1961 // to run the goroutine. 1962 // If we discover new work below, we need to restore m.spinning as a signal 1963 // for resetspinning to unpark a new worker thread (because there can be more 1964 // than one starving goroutine). However, if after discovering new work 1965 // we also observe no idle Ps, it is OK to just park the current thread: 1966 // the system is fully loaded so no spinning threads are required. 1967 // Also see "Worker thread parking/unparking" comment at the top of the file. 1968 wasSpinning := _g_.m.spinning 1969 if _g_.m.spinning { 1970 _g_.m.spinning = false 1971 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1972 throw("findrunnable: negative nmspinning") 1973 } 1974 } 1975 1976 // check all runqueues once again 1977 for i := 0; i < int(gomaxprocs); i++ { 1978 _p_ := allp[i] 1979 if _p_ != nil && !runqempty(_p_) { 1980 lock(&sched.lock) 1981 _p_ = pidleget() 1982 unlock(&sched.lock) 1983 if _p_ != nil { 1984 acquirep(_p_) 1985 if wasSpinning { 1986 _g_.m.spinning = true 1987 atomic.Xadd(&sched.nmspinning, 1) 1988 } 1989 goto top 1990 } 1991 break 1992 } 1993 } 1994 1995 // poll network 1996 if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 1997 if _g_.m.p != 0 { 1998 throw("findrunnable: netpoll with p") 1999 } 2000 if _g_.m.spinning { 2001 throw("findrunnable: netpoll with spinning") 2002 } 2003 gp := netpoll(true) // block until new work is available 2004 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 2005 if gp != nil { 2006 lock(&sched.lock) 2007 _p_ = pidleget() 2008 unlock(&sched.lock) 2009 if _p_ != nil { 2010 acquirep(_p_) 2011 injectglist(gp.schedlink.ptr()) 2012 casgstatus(gp, _Gwaiting, _Grunnable) 2013 if trace.enabled { 2014 traceGoUnpark(gp, 0) 2015 } 2016 return gp, false 2017 } 2018 injectglist(gp) 2019 } 2020 } 2021 stopm() 2022 goto top 2023 } 2024 2025 func resetspinning() { 2026 _g_ := getg() 2027 if !_g_.m.spinning { 2028 throw("resetspinning: not a spinning m") 2029 } 2030 _g_.m.spinning = false 2031 nmspinning := atomic.Xadd(&sched.nmspinning, -1) 2032 if int32(nmspinning) < 0 { 2033 throw("findrunnable: negative nmspinning") 2034 } 2035 // M wakeup policy is deliberately somewhat conservative, so check if we 2036 // need to wakeup another P here. See "Worker thread parking/unparking" 2037 // comment at the top of the file for details. 2038 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 { 2039 wakep() 2040 } 2041 } 2042 2043 // Injects the list of runnable G's into the scheduler. 2044 // Can run concurrently with GC. 2045 func injectglist(glist *g) { 2046 if glist == nil { 2047 return 2048 } 2049 if trace.enabled { 2050 for gp := glist; gp != nil; gp = gp.schedlink.ptr() { 2051 traceGoUnpark(gp, 0) 2052 } 2053 } 2054 lock(&sched.lock) 2055 var n int 2056 for n = 0; glist != nil; n++ { 2057 gp := glist 2058 glist = gp.schedlink.ptr() 2059 casgstatus(gp, _Gwaiting, _Grunnable) 2060 globrunqput(gp) 2061 } 2062 unlock(&sched.lock) 2063 for ; n != 0 && sched.npidle != 0; n-- { 2064 startm(nil, false) 2065 } 2066 } 2067 2068 // One round of scheduler: find a runnable goroutine and execute it. 2069 // Never returns. 2070 func schedule() { 2071 _g_ := getg() 2072 2073 if _g_.m.locks != 0 { 2074 throw("schedule: holding locks") 2075 } 2076 2077 if _g_.m.lockedg != nil { 2078 stoplockedm() 2079 execute(_g_.m.lockedg, false) // Never returns. 2080 } 2081 2082 top: 2083 if sched.gcwaiting != 0 { 2084 gcstopm() 2085 goto top 2086 } 2087 if _g_.m.p.ptr().runSafePointFn != 0 { 2088 runSafePointFn() 2089 } 2090 2091 var gp *g 2092 var inheritTime bool 2093 if trace.enabled || trace.shutdown { 2094 gp = traceReader() 2095 if gp != nil { 2096 casgstatus(gp, _Gwaiting, _Grunnable) 2097 traceGoUnpark(gp, 0) 2098 } 2099 } 2100 if gp == nil && gcBlackenEnabled != 0 { 2101 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 2102 } 2103 if gp == nil { 2104 // Check the global runnable queue once in a while to ensure fairness. 2105 // Otherwise two goroutines can completely occupy the local runqueue 2106 // by constantly respawning each other. 2107 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 2108 lock(&sched.lock) 2109 gp = globrunqget(_g_.m.p.ptr(), 1) 2110 unlock(&sched.lock) 2111 } 2112 } 2113 if gp == nil { 2114 gp, inheritTime = runqget(_g_.m.p.ptr()) 2115 if gp != nil && _g_.m.spinning { 2116 throw("schedule: spinning with local work") 2117 } 2118 } 2119 if gp == nil { 2120 gp, inheritTime = findrunnable() // blocks until work is available 2121 } 2122 2123 // This thread is going to run a goroutine and is not spinning anymore, 2124 // so if it was marked as spinning we need to reset it now and potentially 2125 // start a new spinning M. 2126 if _g_.m.spinning { 2127 resetspinning() 2128 } 2129 2130 if gp.lockedm != nil { 2131 // Hands off own p to the locked m, 2132 // then blocks waiting for a new p. 2133 startlockedm(gp) 2134 goto top 2135 } 2136 2137 execute(gp, inheritTime) 2138 } 2139 2140 // dropg removes the association between m and the current goroutine m->curg (gp for short). 2141 // Typically a caller sets gp's status away from Grunning and then 2142 // immediately calls dropg to finish the job. The caller is also responsible 2143 // for arranging that gp will be restarted using ready at an 2144 // appropriate time. After calling dropg and arranging for gp to be 2145 // readied later, the caller can do other work but eventually should 2146 // call schedule to restart the scheduling of goroutines on this m. 2147 func dropg() { 2148 _g_ := getg() 2149 2150 _g_.m.curg.m = nil 2151 _g_.m.curg = nil 2152 } 2153 2154 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 2155 unlock((*mutex)(lock)) 2156 return true 2157 } 2158 2159 // park continuation on g0. 2160 func park_m(gp *g) { 2161 _g_ := getg() 2162 2163 if trace.enabled { 2164 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp) 2165 } 2166 2167 casgstatus(gp, _Grunning, _Gwaiting) 2168 dropg() 2169 2170 if _g_.m.waitunlockf != nil { 2171 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf)) 2172 ok := fn(gp, _g_.m.waitlock) 2173 _g_.m.waitunlockf = nil 2174 _g_.m.waitlock = nil 2175 if !ok { 2176 if trace.enabled { 2177 traceGoUnpark(gp, 2) 2178 } 2179 casgstatus(gp, _Gwaiting, _Grunnable) 2180 execute(gp, true) // Schedule it back, never returns. 2181 } 2182 } 2183 schedule() 2184 } 2185 2186 func goschedImpl(gp *g) { 2187 status := readgstatus(gp) 2188 if status&^_Gscan != _Grunning { 2189 dumpgstatus(gp) 2190 throw("bad g status") 2191 } 2192 casgstatus(gp, _Grunning, _Grunnable) 2193 dropg() 2194 lock(&sched.lock) 2195 globrunqput(gp) 2196 unlock(&sched.lock) 2197 2198 schedule() 2199 } 2200 2201 // Gosched continuation on g0. 2202 func gosched_m(gp *g) { 2203 if trace.enabled { 2204 traceGoSched() 2205 } 2206 goschedImpl(gp) 2207 } 2208 2209 func gopreempt_m(gp *g) { 2210 if trace.enabled { 2211 traceGoPreempt() 2212 } 2213 goschedImpl(gp) 2214 } 2215 2216 // Finishes execution of the current goroutine. 2217 func goexit1() { 2218 if raceenabled { 2219 racegoend() 2220 } 2221 if trace.enabled { 2222 traceGoEnd() 2223 } 2224 mcall(goexit0) 2225 } 2226 2227 // goexit continuation on g0. 2228 func goexit0(gp *g) { 2229 _g_ := getg() 2230 2231 casgstatus(gp, _Grunning, _Gdead) 2232 if isSystemGoroutine(gp) { 2233 atomic.Xadd(&sched.ngsys, -1) 2234 } 2235 gp.m = nil 2236 gp.lockedm = nil 2237 _g_.m.lockedg = nil 2238 gp.paniconfault = false 2239 gp._defer = nil // should be true already but just in case. 2240 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 2241 gp.writebuf = nil 2242 gp.waitreason = "" 2243 gp.param = nil 2244 2245 // Note that gp's stack scan is now "valid" because it has no 2246 // stack. We could dequeueRescan, but that takes a lock and 2247 // isn't really necessary. 2248 gp.gcscanvalid = true 2249 dropg() 2250 2251 if _g_.m.locked&^_LockExternal != 0 { 2252 print("invalid m->locked = ", _g_.m.locked, "\n") 2253 throw("internal lockOSThread error") 2254 } 2255 _g_.m.locked = 0 2256 gfput(_g_.m.p.ptr(), gp) 2257 schedule() 2258 } 2259 2260 //go:nosplit 2261 //go:nowritebarrier 2262 func save(pc, sp uintptr) { 2263 _g_ := getg() 2264 2265 _g_.sched.pc = pc 2266 _g_.sched.sp = sp 2267 _g_.sched.lr = 0 2268 _g_.sched.ret = 0 2269 _g_.sched.ctxt = nil 2270 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 2271 } 2272 2273 // The goroutine g is about to enter a system call. 2274 // Record that it's not using the cpu anymore. 2275 // This is called only from the go syscall library and cgocall, 2276 // not from the low-level system calls used by the runtime. 2277 // 2278 // Entersyscall cannot split the stack: the gosave must 2279 // make g->sched refer to the caller's stack segment, because 2280 // entersyscall is going to return immediately after. 2281 // 2282 // Nothing entersyscall calls can split the stack either. 2283 // We cannot safely move the stack during an active call to syscall, 2284 // because we do not know which of the uintptr arguments are 2285 // really pointers (back into the stack). 2286 // In practice, this means that we make the fast path run through 2287 // entersyscall doing no-split things, and the slow path has to use systemstack 2288 // to run bigger things on the system stack. 2289 // 2290 // reentersyscall is the entry point used by cgo callbacks, where explicitly 2291 // saved SP and PC are restored. This is needed when exitsyscall will be called 2292 // from a function further up in the call stack than the parent, as g->syscallsp 2293 // must always point to a valid stack frame. entersyscall below is the normal 2294 // entry point for syscalls, which obtains the SP and PC from the caller. 2295 // 2296 // Syscall tracing: 2297 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 2298 // If the syscall does not block, that is it, we do not emit any other events. 2299 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 2300 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 2301 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 2302 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 2303 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 2304 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 2305 // and we wait for the increment before emitting traceGoSysExit. 2306 // Note that the increment is done even if tracing is not enabled, 2307 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 2308 // 2309 //go:nosplit 2310 func reentersyscall(pc, sp uintptr) { 2311 _g_ := getg() 2312 2313 // Disable preemption because during this function g is in Gsyscall status, 2314 // but can have inconsistent g->sched, do not let GC observe it. 2315 _g_.m.locks++ 2316 2317 // Entersyscall must not call any function that might split/grow the stack. 2318 // (See details in comment above.) 2319 // Catch calls that might, by replacing the stack guard with something that 2320 // will trip any stack check and leaving a flag to tell newstack to die. 2321 _g_.stackguard0 = stackPreempt 2322 _g_.throwsplit = true 2323 2324 // Leave SP around for GC and traceback. 2325 save(pc, sp) 2326 _g_.syscallsp = sp 2327 _g_.syscallpc = pc 2328 casgstatus(_g_, _Grunning, _Gsyscall) 2329 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2330 systemstack(func() { 2331 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2332 throw("entersyscall") 2333 }) 2334 } 2335 2336 if trace.enabled { 2337 systemstack(traceGoSysCall) 2338 // systemstack itself clobbers g.sched.{pc,sp} and we might 2339 // need them later when the G is genuinely blocked in a 2340 // syscall 2341 save(pc, sp) 2342 } 2343 2344 if atomic.Load(&sched.sysmonwait) != 0 { // TODO: fast atomic 2345 systemstack(entersyscall_sysmon) 2346 save(pc, sp) 2347 } 2348 2349 if _g_.m.p.ptr().runSafePointFn != 0 { 2350 // runSafePointFn may stack split if run on this stack 2351 systemstack(runSafePointFn) 2352 save(pc, sp) 2353 } 2354 2355 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2356 _g_.sysblocktraced = true 2357 _g_.m.mcache = nil 2358 _g_.m.p.ptr().m = 0 2359 atomic.Store(&_g_.m.p.ptr().status, _Psyscall) 2360 if sched.gcwaiting != 0 { 2361 systemstack(entersyscall_gcwait) 2362 save(pc, sp) 2363 } 2364 2365 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). 2366 // We set _StackGuard to StackPreempt so that first split stack check calls morestack. 2367 // Morestack detects this case and throws. 2368 _g_.stackguard0 = stackPreempt 2369 _g_.m.locks-- 2370 } 2371 2372 // Standard syscall entry used by the go syscall library and normal cgo calls. 2373 //go:nosplit 2374 func entersyscall(dummy int32) { 2375 reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2376 } 2377 2378 func entersyscall_sysmon() { 2379 lock(&sched.lock) 2380 if atomic.Load(&sched.sysmonwait) != 0 { 2381 atomic.Store(&sched.sysmonwait, 0) 2382 notewakeup(&sched.sysmonnote) 2383 } 2384 unlock(&sched.lock) 2385 } 2386 2387 func entersyscall_gcwait() { 2388 _g_ := getg() 2389 _p_ := _g_.m.p.ptr() 2390 2391 lock(&sched.lock) 2392 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 2393 if trace.enabled { 2394 traceGoSysBlock(_p_) 2395 traceProcStop(_p_) 2396 } 2397 _p_.syscalltick++ 2398 if sched.stopwait--; sched.stopwait == 0 { 2399 notewakeup(&sched.stopnote) 2400 } 2401 } 2402 unlock(&sched.lock) 2403 } 2404 2405 // The same as entersyscall(), but with a hint that the syscall is blocking. 2406 //go:nosplit 2407 func entersyscallblock(dummy int32) { 2408 _g_ := getg() 2409 2410 _g_.m.locks++ // see comment in entersyscall 2411 _g_.throwsplit = true 2412 _g_.stackguard0 = stackPreempt // see comment in entersyscall 2413 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2414 _g_.sysblocktraced = true 2415 _g_.m.p.ptr().syscalltick++ 2416 2417 // Leave SP around for GC and traceback. 2418 pc := getcallerpc(unsafe.Pointer(&dummy)) 2419 sp := getcallersp(unsafe.Pointer(&dummy)) 2420 save(pc, sp) 2421 _g_.syscallsp = _g_.sched.sp 2422 _g_.syscallpc = _g_.sched.pc 2423 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2424 sp1 := sp 2425 sp2 := _g_.sched.sp 2426 sp3 := _g_.syscallsp 2427 systemstack(func() { 2428 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2429 throw("entersyscallblock") 2430 }) 2431 } 2432 casgstatus(_g_, _Grunning, _Gsyscall) 2433 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2434 systemstack(func() { 2435 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2436 throw("entersyscallblock") 2437 }) 2438 } 2439 2440 systemstack(entersyscallblock_handoff) 2441 2442 // Resave for traceback during blocked call. 2443 save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2444 2445 _g_.m.locks-- 2446 } 2447 2448 func entersyscallblock_handoff() { 2449 if trace.enabled { 2450 traceGoSysCall() 2451 traceGoSysBlock(getg().m.p.ptr()) 2452 } 2453 handoffp(releasep()) 2454 } 2455 2456 // The goroutine g exited its system call. 2457 // Arrange for it to run on a cpu again. 2458 // This is called only from the go syscall library, not 2459 // from the low-level system calls used by the runtime. 2460 //go:nosplit 2461 func exitsyscall(dummy int32) { 2462 _g_ := getg() 2463 2464 _g_.m.locks++ // see comment in entersyscall 2465 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp { 2466 // throw calls print which may try to grow the stack, 2467 // but throwsplit == true so the stack can not be grown; 2468 // use systemstack to avoid that possible problem. 2469 systemstack(func() { 2470 throw("exitsyscall: syscall frame is no longer valid") 2471 }) 2472 } 2473 2474 _g_.waitsince = 0 2475 oldp := _g_.m.p.ptr() 2476 if exitsyscallfast() { 2477 if _g_.m.mcache == nil { 2478 throw("lost mcache") 2479 } 2480 if trace.enabled { 2481 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2482 systemstack(traceGoStart) 2483 } 2484 } 2485 // There's a cpu for us, so we can run. 2486 _g_.m.p.ptr().syscalltick++ 2487 // We need to cas the status and scan before resuming... 2488 casgstatus(_g_, _Gsyscall, _Grunning) 2489 2490 // Garbage collector isn't running (since we are), 2491 // so okay to clear syscallsp. 2492 _g_.syscallsp = 0 2493 _g_.m.locks-- 2494 if _g_.preempt { 2495 // restore the preemption request in case we've cleared it in newstack 2496 _g_.stackguard0 = stackPreempt 2497 } else { 2498 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 2499 _g_.stackguard0 = _g_.stack.lo + _StackGuard 2500 } 2501 _g_.throwsplit = false 2502 return 2503 } 2504 2505 _g_.sysexitticks = 0 2506 if trace.enabled { 2507 // Wait till traceGoSysBlock event is emitted. 2508 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2509 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 2510 osyield() 2511 } 2512 // We can't trace syscall exit right now because we don't have a P. 2513 // Tracing code can invoke write barriers that cannot run without a P. 2514 // So instead we remember the syscall exit time and emit the event 2515 // in execute when we have a P. 2516 _g_.sysexitticks = cputicks() 2517 } 2518 2519 _g_.m.locks-- 2520 2521 // Call the scheduler. 2522 mcall(exitsyscall0) 2523 2524 if _g_.m.mcache == nil { 2525 throw("lost mcache") 2526 } 2527 2528 // Scheduler returned, so we're allowed to run now. 2529 // Delete the syscallsp information that we left for 2530 // the garbage collector during the system call. 2531 // Must wait until now because until gosched returns 2532 // we don't know for sure that the garbage collector 2533 // is not running. 2534 _g_.syscallsp = 0 2535 _g_.m.p.ptr().syscalltick++ 2536 _g_.throwsplit = false 2537 } 2538 2539 //go:nosplit 2540 func exitsyscallfast() bool { 2541 _g_ := getg() 2542 2543 // Freezetheworld sets stopwait but does not retake P's. 2544 if sched.stopwait == freezeStopWait { 2545 _g_.m.mcache = nil 2546 _g_.m.p = 0 2547 return false 2548 } 2549 2550 // Try to re-acquire the last P. 2551 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) { 2552 // There's a cpu for us, so we can run. 2553 _g_.m.mcache = _g_.m.p.ptr().mcache 2554 _g_.m.p.ptr().m.set(_g_.m) 2555 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2556 if trace.enabled { 2557 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 2558 // traceGoSysBlock for this syscall was already emitted, 2559 // but here we effectively retake the p from the new syscall running on the same p. 2560 systemstack(func() { 2561 // Denote blocking of the new syscall. 2562 traceGoSysBlock(_g_.m.p.ptr()) 2563 // Denote completion of the current syscall. 2564 traceGoSysExit(0) 2565 }) 2566 } 2567 _g_.m.p.ptr().syscalltick++ 2568 } 2569 return true 2570 } 2571 2572 // Try to get any other idle P. 2573 oldp := _g_.m.p.ptr() 2574 _g_.m.mcache = nil 2575 _g_.m.p = 0 2576 if sched.pidle != 0 { 2577 var ok bool 2578 systemstack(func() { 2579 ok = exitsyscallfast_pidle() 2580 if ok && trace.enabled { 2581 if oldp != nil { 2582 // Wait till traceGoSysBlock event is emitted. 2583 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2584 for oldp.syscalltick == _g_.m.syscalltick { 2585 osyield() 2586 } 2587 } 2588 traceGoSysExit(0) 2589 } 2590 }) 2591 if ok { 2592 return true 2593 } 2594 } 2595 return false 2596 } 2597 2598 func exitsyscallfast_pidle() bool { 2599 lock(&sched.lock) 2600 _p_ := pidleget() 2601 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 2602 atomic.Store(&sched.sysmonwait, 0) 2603 notewakeup(&sched.sysmonnote) 2604 } 2605 unlock(&sched.lock) 2606 if _p_ != nil { 2607 acquirep(_p_) 2608 return true 2609 } 2610 return false 2611 } 2612 2613 // exitsyscall slow path on g0. 2614 // Failed to acquire P, enqueue gp as runnable. 2615 func exitsyscall0(gp *g) { 2616 _g_ := getg() 2617 2618 casgstatus(gp, _Gsyscall, _Grunnable) 2619 dropg() 2620 lock(&sched.lock) 2621 _p_ := pidleget() 2622 if _p_ == nil { 2623 globrunqput(gp) 2624 } else if atomic.Load(&sched.sysmonwait) != 0 { 2625 atomic.Store(&sched.sysmonwait, 0) 2626 notewakeup(&sched.sysmonnote) 2627 } 2628 unlock(&sched.lock) 2629 if _p_ != nil { 2630 acquirep(_p_) 2631 execute(gp, false) // Never returns. 2632 } 2633 if _g_.m.lockedg != nil { 2634 // Wait until another thread schedules gp and so m again. 2635 stoplockedm() 2636 execute(gp, false) // Never returns. 2637 } 2638 stopm() 2639 schedule() // Never returns. 2640 } 2641 2642 func beforefork() { 2643 gp := getg().m.curg 2644 2645 // Fork can hang if preempted with signals frequently enough (see issue 5517). 2646 // Ensure that we stay on the same M where we disable profiling. 2647 gp.m.locks++ 2648 if gp.m.profilehz != 0 { 2649 resetcpuprofiler(0) 2650 } 2651 2652 // This function is called before fork in syscall package. 2653 // Code between fork and exec must not allocate memory nor even try to grow stack. 2654 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 2655 // runtime_AfterFork will undo this in parent process, but not in child. 2656 gp.stackguard0 = stackFork 2657 } 2658 2659 // Called from syscall package before fork. 2660 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 2661 //go:nosplit 2662 func syscall_runtime_BeforeFork() { 2663 systemstack(beforefork) 2664 } 2665 2666 func afterfork() { 2667 gp := getg().m.curg 2668 2669 // See the comment in beforefork. 2670 gp.stackguard0 = gp.stack.lo + _StackGuard 2671 2672 hz := sched.profilehz 2673 if hz != 0 { 2674 resetcpuprofiler(hz) 2675 } 2676 gp.m.locks-- 2677 } 2678 2679 // Called from syscall package after fork in parent. 2680 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 2681 //go:nosplit 2682 func syscall_runtime_AfterFork() { 2683 systemstack(afterfork) 2684 } 2685 2686 // Allocate a new g, with a stack big enough for stacksize bytes. 2687 func malg(stacksize int32) *g { 2688 newg := new(g) 2689 if stacksize >= 0 { 2690 stacksize = round2(_StackSystem + stacksize) 2691 systemstack(func() { 2692 newg.stack, newg.stkbar = stackalloc(uint32(stacksize)) 2693 }) 2694 newg.stackguard0 = newg.stack.lo + _StackGuard 2695 newg.stackguard1 = ^uintptr(0) 2696 newg.stackAlloc = uintptr(stacksize) 2697 } 2698 return newg 2699 } 2700 2701 // Create a new g running fn with siz bytes of arguments. 2702 // Put it on the queue of g's waiting to run. 2703 // The compiler turns a go statement into a call to this. 2704 // Cannot split the stack because it assumes that the arguments 2705 // are available sequentially after &fn; they would not be 2706 // copied if a stack split occurred. 2707 //go:nosplit 2708 func newproc(siz int32, fn *funcval) { 2709 argp := add(unsafe.Pointer(&fn), sys.PtrSize) 2710 pc := getcallerpc(unsafe.Pointer(&siz)) 2711 systemstack(func() { 2712 newproc1(fn, (*uint8)(argp), siz, 0, pc) 2713 }) 2714 } 2715 2716 // Create a new g running fn with narg bytes of arguments starting 2717 // at argp and returning nret bytes of results. callerpc is the 2718 // address of the go statement that created this. The new g is put 2719 // on the queue of g's waiting to run. 2720 func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g { 2721 _g_ := getg() 2722 2723 if fn == nil { 2724 _g_.m.throwing = -1 // do not dump full stacks 2725 throw("go of nil func value") 2726 } 2727 _g_.m.locks++ // disable preemption because it can be holding p in a local var 2728 siz := narg + nret 2729 siz = (siz + 7) &^ 7 2730 2731 // We could allocate a larger initial stack if necessary. 2732 // Not worth it: this is almost always an error. 2733 // 4*sizeof(uintreg): extra space added below 2734 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 2735 if siz >= _StackMin-4*sys.RegSize-sys.RegSize { 2736 throw("newproc: function arguments too large for new goroutine") 2737 } 2738 2739 _p_ := _g_.m.p.ptr() 2740 newg := gfget(_p_) 2741 if newg == nil { 2742 newg = malg(_StackMin) 2743 casgstatus(newg, _Gidle, _Gdead) 2744 newg.gcRescan = -1 2745 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 2746 } 2747 if newg.stack.hi == 0 { 2748 throw("newproc1: newg missing stack") 2749 } 2750 2751 if readgstatus(newg) != _Gdead { 2752 throw("newproc1: new g is not Gdead") 2753 } 2754 2755 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame 2756 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign 2757 sp := newg.stack.hi - totalSize 2758 spArg := sp 2759 if usesLR { 2760 // caller's LR 2761 *(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil 2762 prepGoExitFrame(sp) 2763 spArg += sys.MinFrameSize 2764 } 2765 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) 2766 2767 memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 2768 newg.sched.sp = sp 2769 newg.stktopsp = sp 2770 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 2771 newg.sched.g = guintptr(unsafe.Pointer(newg)) 2772 gostartcallfn(&newg.sched, fn) 2773 newg.gopc = callerpc 2774 newg.startpc = fn.fn 2775 if isSystemGoroutine(newg) { 2776 atomic.Xadd(&sched.ngsys, +1) 2777 } 2778 // The stack is dirty from the argument frame, so queue it for 2779 // scanning. Do this before setting it to runnable so we still 2780 // own the G. If we're recycling a G, it may already be on the 2781 // rescan list. 2782 if newg.gcRescan == -1 { 2783 queueRescan(newg) 2784 } else { 2785 // The recycled G is already on the rescan list. Just 2786 // mark the stack dirty. 2787 newg.gcscanvalid = false 2788 } 2789 casgstatus(newg, _Gdead, _Grunnable) 2790 2791 if _p_.goidcache == _p_.goidcacheend { 2792 // Sched.goidgen is the last allocated id, 2793 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 2794 // At startup sched.goidgen=0, so main goroutine receives goid=1. 2795 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 2796 _p_.goidcache -= _GoidCacheBatch - 1 2797 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 2798 } 2799 newg.goid = int64(_p_.goidcache) 2800 _p_.goidcache++ 2801 if raceenabled { 2802 newg.racectx = racegostart(callerpc) 2803 } 2804 if trace.enabled { 2805 traceGoCreate(newg, newg.startpc) 2806 } 2807 runqput(_p_, newg, true) 2808 2809 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic 2810 wakep() 2811 } 2812 _g_.m.locks-- 2813 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 2814 _g_.stackguard0 = stackPreempt 2815 } 2816 return newg 2817 } 2818 2819 // Put on gfree list. 2820 // If local list is too long, transfer a batch to the global list. 2821 func gfput(_p_ *p, gp *g) { 2822 if readgstatus(gp) != _Gdead { 2823 throw("gfput: bad status (not Gdead)") 2824 } 2825 2826 stksize := gp.stackAlloc 2827 2828 if stksize != _FixedStack { 2829 // non-standard stack size - free it. 2830 stackfree(gp.stack, gp.stackAlloc) 2831 gp.stack.lo = 0 2832 gp.stack.hi = 0 2833 gp.stackguard0 = 0 2834 gp.stkbar = nil 2835 gp.stkbarPos = 0 2836 } else { 2837 // Reset stack barriers. 2838 gp.stkbar = gp.stkbar[:0] 2839 gp.stkbarPos = 0 2840 } 2841 2842 gp.schedlink.set(_p_.gfree) 2843 _p_.gfree = gp 2844 _p_.gfreecnt++ 2845 if _p_.gfreecnt >= 64 { 2846 lock(&sched.gflock) 2847 for _p_.gfreecnt >= 32 { 2848 _p_.gfreecnt-- 2849 gp = _p_.gfree 2850 _p_.gfree = gp.schedlink.ptr() 2851 if gp.stack.lo == 0 { 2852 gp.schedlink.set(sched.gfreeNoStack) 2853 sched.gfreeNoStack = gp 2854 } else { 2855 gp.schedlink.set(sched.gfreeStack) 2856 sched.gfreeStack = gp 2857 } 2858 sched.ngfree++ 2859 } 2860 unlock(&sched.gflock) 2861 } 2862 } 2863 2864 // Get from gfree list. 2865 // If local list is empty, grab a batch from global list. 2866 func gfget(_p_ *p) *g { 2867 retry: 2868 gp := _p_.gfree 2869 if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) { 2870 lock(&sched.gflock) 2871 for _p_.gfreecnt < 32 { 2872 if sched.gfreeStack != nil { 2873 // Prefer Gs with stacks. 2874 gp = sched.gfreeStack 2875 sched.gfreeStack = gp.schedlink.ptr() 2876 } else if sched.gfreeNoStack != nil { 2877 gp = sched.gfreeNoStack 2878 sched.gfreeNoStack = gp.schedlink.ptr() 2879 } else { 2880 break 2881 } 2882 _p_.gfreecnt++ 2883 sched.ngfree-- 2884 gp.schedlink.set(_p_.gfree) 2885 _p_.gfree = gp 2886 } 2887 unlock(&sched.gflock) 2888 goto retry 2889 } 2890 if gp != nil { 2891 _p_.gfree = gp.schedlink.ptr() 2892 _p_.gfreecnt-- 2893 if gp.stack.lo == 0 { 2894 // Stack was deallocated in gfput. Allocate a new one. 2895 systemstack(func() { 2896 gp.stack, gp.stkbar = stackalloc(_FixedStack) 2897 }) 2898 gp.stackguard0 = gp.stack.lo + _StackGuard 2899 gp.stackAlloc = _FixedStack 2900 } else { 2901 if raceenabled { 2902 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) 2903 } 2904 if msanenabled { 2905 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) 2906 } 2907 } 2908 } 2909 return gp 2910 } 2911 2912 // Purge all cached G's from gfree list to the global list. 2913 func gfpurge(_p_ *p) { 2914 lock(&sched.gflock) 2915 for _p_.gfreecnt != 0 { 2916 _p_.gfreecnt-- 2917 gp := _p_.gfree 2918 _p_.gfree = gp.schedlink.ptr() 2919 if gp.stack.lo == 0 { 2920 gp.schedlink.set(sched.gfreeNoStack) 2921 sched.gfreeNoStack = gp 2922 } else { 2923 gp.schedlink.set(sched.gfreeStack) 2924 sched.gfreeStack = gp 2925 } 2926 sched.ngfree++ 2927 } 2928 unlock(&sched.gflock) 2929 } 2930 2931 // Breakpoint executes a breakpoint trap. 2932 func Breakpoint() { 2933 breakpoint() 2934 } 2935 2936 // dolockOSThread is called by LockOSThread and lockOSThread below 2937 // after they modify m.locked. Do not allow preemption during this call, 2938 // or else the m might be different in this function than in the caller. 2939 //go:nosplit 2940 func dolockOSThread() { 2941 _g_ := getg() 2942 _g_.m.lockedg = _g_ 2943 _g_.lockedm = _g_.m 2944 } 2945 2946 //go:nosplit 2947 2948 // LockOSThread wires the calling goroutine to its current operating system thread. 2949 // Until the calling goroutine exits or calls UnlockOSThread, it will always 2950 // execute in that thread, and no other goroutine can. 2951 func LockOSThread() { 2952 getg().m.locked |= _LockExternal 2953 dolockOSThread() 2954 } 2955 2956 //go:nosplit 2957 func lockOSThread() { 2958 getg().m.locked += _LockInternal 2959 dolockOSThread() 2960 } 2961 2962 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 2963 // after they update m->locked. Do not allow preemption during this call, 2964 // or else the m might be in different in this function than in the caller. 2965 //go:nosplit 2966 func dounlockOSThread() { 2967 _g_ := getg() 2968 if _g_.m.locked != 0 { 2969 return 2970 } 2971 _g_.m.lockedg = nil 2972 _g_.lockedm = nil 2973 } 2974 2975 //go:nosplit 2976 2977 // UnlockOSThread unwires the calling goroutine from its fixed operating system thread. 2978 // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op. 2979 func UnlockOSThread() { 2980 getg().m.locked &^= _LockExternal 2981 dounlockOSThread() 2982 } 2983 2984 //go:nosplit 2985 func unlockOSThread() { 2986 _g_ := getg() 2987 if _g_.m.locked < _LockInternal { 2988 systemstack(badunlockosthread) 2989 } 2990 _g_.m.locked -= _LockInternal 2991 dounlockOSThread() 2992 } 2993 2994 func badunlockosthread() { 2995 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 2996 } 2997 2998 func gcount() int32 { 2999 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys)) 3000 for i := 0; ; i++ { 3001 _p_ := allp[i] 3002 if _p_ == nil { 3003 break 3004 } 3005 n -= _p_.gfreecnt 3006 } 3007 3008 // All these variables can be changed concurrently, so the result can be inconsistent. 3009 // But at least the current goroutine is running. 3010 if n < 1 { 3011 n = 1 3012 } 3013 return n 3014 } 3015 3016 func mcount() int32 { 3017 return sched.mcount 3018 } 3019 3020 var prof struct { 3021 lock uint32 3022 hz int32 3023 } 3024 3025 func _System() { _System() } 3026 func _ExternalCode() { _ExternalCode() } 3027 func _GC() { _GC() } 3028 3029 // Called if we receive a SIGPROF signal. 3030 // Called by the signal handler, may run during STW. 3031 //go:nowritebarrierrec 3032 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 3033 if prof.hz == 0 { 3034 return 3035 } 3036 3037 // Profiling runs concurrently with GC, so it must not allocate. 3038 mp.mallocing++ 3039 3040 // Define that a "user g" is a user-created goroutine, and a "system g" 3041 // is one that is m->g0 or m->gsignal. 3042 // 3043 // We might be interrupted for profiling halfway through a 3044 // goroutine switch. The switch involves updating three (or four) values: 3045 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 3046 // because once it gets updated the new g is running. 3047 // 3048 // When switching from a user g to a system g, LR is not considered live, 3049 // so the update only affects g, SP, and PC. Since PC must be last, there 3050 // the possible partial transitions in ordinary execution are (1) g alone is updated, 3051 // (2) both g and SP are updated, and (3) SP alone is updated. 3052 // If SP or g alone is updated, we can detect the partial transition by checking 3053 // whether the SP is within g's stack bounds. (We could also require that SP 3054 // be changed only after g, but the stack bounds check is needed by other 3055 // cases, so there is no need to impose an additional requirement.) 3056 // 3057 // There is one exceptional transition to a system g, not in ordinary execution. 3058 // When a signal arrives, the operating system starts the signal handler running 3059 // with an updated PC and SP. The g is updated last, at the beginning of the 3060 // handler. There are two reasons this is okay. First, until g is updated the 3061 // g and SP do not match, so the stack bounds check detects the partial transition. 3062 // Second, signal handlers currently run with signals disabled, so a profiling 3063 // signal cannot arrive during the handler. 3064 // 3065 // When switching from a system g to a user g, there are three possibilities. 3066 // 3067 // First, it may be that the g switch has no PC update, because the SP 3068 // either corresponds to a user g throughout (as in asmcgocall) 3069 // or because it has been arranged to look like a user g frame 3070 // (as in cgocallback_gofunc). In this case, since the entire 3071 // transition is a g+SP update, a partial transition updating just one of 3072 // those will be detected by the stack bounds check. 3073 // 3074 // Second, when returning from a signal handler, the PC and SP updates 3075 // are performed by the operating system in an atomic update, so the g 3076 // update must be done before them. The stack bounds check detects 3077 // the partial transition here, and (again) signal handlers run with signals 3078 // disabled, so a profiling signal cannot arrive then anyway. 3079 // 3080 // Third, the common case: it may be that the switch updates g, SP, and PC 3081 // separately. If the PC is within any of the functions that does this, 3082 // we don't ask for a traceback. C.F. the function setsSP for more about this. 3083 // 3084 // There is another apparently viable approach, recorded here in case 3085 // the "PC within setsSP function" check turns out not to be usable. 3086 // It would be possible to delay the update of either g or SP until immediately 3087 // before the PC update instruction. Then, because of the stack bounds check, 3088 // the only problematic interrupt point is just before that PC update instruction, 3089 // and the sigprof handler can detect that instruction and simulate stepping past 3090 // it in order to reach a consistent state. On ARM, the update of g must be made 3091 // in two places (in R10 and also in a TLS slot), so the delayed update would 3092 // need to be the SP update. The sigprof handler must read the instruction at 3093 // the current PC and if it was the known instruction (for example, JMP BX or 3094 // MOV R2, PC), use that other register in place of the PC value. 3095 // The biggest drawback to this solution is that it requires that we can tell 3096 // whether it's safe to read from the memory pointed at by PC. 3097 // In a correct program, we can test PC == nil and otherwise read, 3098 // but if a profiling signal happens at the instant that a program executes 3099 // a bad jump (before the program manages to handle the resulting fault) 3100 // the profiling handler could fault trying to read nonexistent memory. 3101 // 3102 // To recap, there are no constraints on the assembly being used for the 3103 // transition. We simply require that g and SP match and that the PC is not 3104 // in gogo. 3105 traceback := true 3106 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) { 3107 traceback = false 3108 } 3109 var stk [maxCPUProfStack]uintptr 3110 var haveStackLock *g 3111 n := 0 3112 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 3113 cgoOff := 0 3114 // Check cgoCallersUse to make sure that we are not 3115 // interrupting other code that is fiddling with 3116 // cgoCallers. We are running in a signal handler 3117 // with all signals blocked, so we don't have to worry 3118 // about any other code interrupting us. 3119 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 { 3120 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { 3121 cgoOff++ 3122 } 3123 copy(stk[:], mp.cgoCallers[:cgoOff]) 3124 mp.cgoCallers[0] = 0 3125 } 3126 3127 // Collect Go stack that leads to the cgo call. 3128 if gcTryLockStackBarriers(mp.curg) { 3129 haveStackLock = mp.curg 3130 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0) 3131 } 3132 } else if traceback { 3133 var flags uint = _TraceTrap 3134 if gp.m.curg != nil && gcTryLockStackBarriers(gp.m.curg) { 3135 // It's safe to traceback the user stack. 3136 haveStackLock = gp.m.curg 3137 flags |= _TraceJumpStack 3138 } 3139 // Traceback is safe if we're on the system stack (if 3140 // necessary, flags will stop it before switching to 3141 // the user stack), or if we locked the user stack. 3142 if gp != gp.m.curg || haveStackLock != nil { 3143 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, flags) 3144 } 3145 } 3146 if haveStackLock != nil { 3147 gcUnlockStackBarriers(haveStackLock) 3148 } 3149 3150 if n <= 0 { 3151 // Normal traceback is impossible or has failed. 3152 // See if it falls into several common cases. 3153 n = 0 3154 if GOOS == "windows" && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 3155 // Libcall, i.e. runtime syscall on windows. 3156 // Collect Go stack that leads to the call. 3157 if gcTryLockStackBarriers(mp.libcallg.ptr()) { 3158 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 3159 gcUnlockStackBarriers(mp.libcallg.ptr()) 3160 } 3161 } 3162 if n == 0 { 3163 // If all of the above has failed, account it against abstract "System" or "GC". 3164 n = 2 3165 // "ExternalCode" is better than "etext". 3166 if pc > firstmoduledata.etext { 3167 pc = funcPC(_ExternalCode) + sys.PCQuantum 3168 } 3169 stk[0] = pc 3170 if mp.preemptoff != "" || mp.helpgc != 0 { 3171 stk[1] = funcPC(_GC) + sys.PCQuantum 3172 } else { 3173 stk[1] = funcPC(_System) + sys.PCQuantum 3174 } 3175 } 3176 } 3177 3178 if prof.hz != 0 { 3179 // Simple cas-lock to coordinate with setcpuprofilerate. 3180 for !atomic.Cas(&prof.lock, 0, 1) { 3181 osyield() 3182 } 3183 if prof.hz != 0 { 3184 cpuprof.add(stk[:n]) 3185 } 3186 atomic.Store(&prof.lock, 0) 3187 } 3188 mp.mallocing-- 3189 } 3190 3191 // If the signal handler receives a SIGPROF signal on a non-Go thread, 3192 // it tries to collect a traceback into sigprofCallers. 3193 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback. 3194 var sigprofCallers cgoCallers 3195 var sigprofCallersUse uint32 3196 3197 // Called if we receive a SIGPROF signal on a non-Go thread. 3198 // When this is called, sigprofCallersUse will be non-zero. 3199 // g is nil, and what we can do is very limited. 3200 //go:nosplit 3201 //go:nowritebarrierrec 3202 func sigprofNonGo() { 3203 if prof.hz != 0 { 3204 n := 0 3205 for n < len(sigprofCallers) && sigprofCallers[n] != 0 { 3206 n++ 3207 } 3208 3209 // Simple cas-lock to coordinate with setcpuprofilerate. 3210 if atomic.Cas(&prof.lock, 0, 1) { 3211 if prof.hz != 0 { 3212 cpuprof.addNonGo(sigprofCallers[:n]) 3213 } 3214 atomic.Store(&prof.lock, 0) 3215 } 3216 } 3217 3218 atomic.Store(&sigprofCallersUse, 0) 3219 } 3220 3221 // Reports whether a function will set the SP 3222 // to an absolute value. Important that 3223 // we don't traceback when these are at the bottom 3224 // of the stack since we can't be sure that we will 3225 // find the caller. 3226 // 3227 // If the function is not on the bottom of the stack 3228 // we assume that it will have set it up so that traceback will be consistent, 3229 // either by being a traceback terminating function 3230 // or putting one on the stack at the right offset. 3231 func setsSP(pc uintptr) bool { 3232 f := findfunc(pc) 3233 if f == nil { 3234 // couldn't find the function for this PC, 3235 // so assume the worst and stop traceback 3236 return true 3237 } 3238 switch f.entry { 3239 case gogoPC, systemstackPC, mcallPC, morestackPC: 3240 return true 3241 } 3242 return false 3243 } 3244 3245 // Arrange to call fn with a traceback hz times a second. 3246 func setcpuprofilerate_m(hz int32) { 3247 // Force sane arguments. 3248 if hz < 0 { 3249 hz = 0 3250 } 3251 3252 // Disable preemption, otherwise we can be rescheduled to another thread 3253 // that has profiling enabled. 3254 _g_ := getg() 3255 _g_.m.locks++ 3256 3257 // Stop profiler on this thread so that it is safe to lock prof. 3258 // if a profiling signal came in while we had prof locked, 3259 // it would deadlock. 3260 resetcpuprofiler(0) 3261 3262 for !atomic.Cas(&prof.lock, 0, 1) { 3263 osyield() 3264 } 3265 prof.hz = hz 3266 atomic.Store(&prof.lock, 0) 3267 3268 lock(&sched.lock) 3269 sched.profilehz = hz 3270 unlock(&sched.lock) 3271 3272 if hz != 0 { 3273 resetcpuprofiler(hz) 3274 } 3275 3276 _g_.m.locks-- 3277 } 3278 3279 // Change number of processors. The world is stopped, sched is locked. 3280 // gcworkbufs are not being modified by either the GC or 3281 // the write barrier code. 3282 // Returns list of Ps with local work, they need to be scheduled by the caller. 3283 func procresize(nprocs int32) *p { 3284 old := gomaxprocs 3285 if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs { 3286 throw("procresize: invalid arg") 3287 } 3288 if trace.enabled { 3289 traceGomaxprocs(nprocs) 3290 } 3291 3292 // update statistics 3293 now := nanotime() 3294 if sched.procresizetime != 0 { 3295 sched.totaltime += int64(old) * (now - sched.procresizetime) 3296 } 3297 sched.procresizetime = now 3298 3299 // initialize new P's 3300 for i := int32(0); i < nprocs; i++ { 3301 pp := allp[i] 3302 if pp == nil { 3303 pp = new(p) 3304 pp.id = i 3305 pp.status = _Pgcstop 3306 pp.sudogcache = pp.sudogbuf[:0] 3307 for i := range pp.deferpool { 3308 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 3309 } 3310 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 3311 } 3312 if pp.mcache == nil { 3313 if old == 0 && i == 0 { 3314 if getg().m.mcache == nil { 3315 throw("missing mcache?") 3316 } 3317 pp.mcache = getg().m.mcache // bootstrap 3318 } else { 3319 pp.mcache = allocmcache() 3320 } 3321 } 3322 if raceenabled && pp.racectx == 0 { 3323 if old == 0 && i == 0 { 3324 pp.racectx = raceprocctx0 3325 raceprocctx0 = 0 // bootstrap 3326 } else { 3327 pp.racectx = raceproccreate() 3328 } 3329 } 3330 } 3331 3332 // free unused P's 3333 for i := nprocs; i < old; i++ { 3334 p := allp[i] 3335 if trace.enabled { 3336 if p == getg().m.p.ptr() { 3337 // moving to p[0], pretend that we were descheduled 3338 // and then scheduled again to keep the trace sane. 3339 traceGoSched() 3340 traceProcStop(p) 3341 } 3342 } 3343 // move all runnable goroutines to the global queue 3344 for p.runqhead != p.runqtail { 3345 // pop from tail of local queue 3346 p.runqtail-- 3347 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr() 3348 // push onto head of global queue 3349 globrunqputhead(gp) 3350 } 3351 if p.runnext != 0 { 3352 globrunqputhead(p.runnext.ptr()) 3353 p.runnext = 0 3354 } 3355 // if there's a background worker, make it runnable and put 3356 // it on the global queue so it can clean itself up 3357 if gp := p.gcBgMarkWorker.ptr(); gp != nil { 3358 casgstatus(gp, _Gwaiting, _Grunnable) 3359 if trace.enabled { 3360 traceGoUnpark(gp, 0) 3361 } 3362 globrunqput(gp) 3363 // This assignment doesn't race because the 3364 // world is stopped. 3365 p.gcBgMarkWorker.set(nil) 3366 } 3367 for i := range p.sudogbuf { 3368 p.sudogbuf[i] = nil 3369 } 3370 p.sudogcache = p.sudogbuf[:0] 3371 for i := range p.deferpool { 3372 for j := range p.deferpoolbuf[i] { 3373 p.deferpoolbuf[i][j] = nil 3374 } 3375 p.deferpool[i] = p.deferpoolbuf[i][:0] 3376 } 3377 freemcache(p.mcache) 3378 p.mcache = nil 3379 gfpurge(p) 3380 traceProcFree(p) 3381 if raceenabled { 3382 raceprocdestroy(p.racectx) 3383 p.racectx = 0 3384 } 3385 p.status = _Pdead 3386 // can't free P itself because it can be referenced by an M in syscall 3387 } 3388 3389 _g_ := getg() 3390 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 3391 // continue to use the current P 3392 _g_.m.p.ptr().status = _Prunning 3393 } else { 3394 // release the current P and acquire allp[0] 3395 if _g_.m.p != 0 { 3396 _g_.m.p.ptr().m = 0 3397 } 3398 _g_.m.p = 0 3399 _g_.m.mcache = nil 3400 p := allp[0] 3401 p.m = 0 3402 p.status = _Pidle 3403 acquirep(p) 3404 if trace.enabled { 3405 traceGoStart() 3406 } 3407 } 3408 var runnablePs *p 3409 for i := nprocs - 1; i >= 0; i-- { 3410 p := allp[i] 3411 if _g_.m.p.ptr() == p { 3412 continue 3413 } 3414 p.status = _Pidle 3415 if runqempty(p) { 3416 pidleput(p) 3417 } else { 3418 p.m.set(mget()) 3419 p.link.set(runnablePs) 3420 runnablePs = p 3421 } 3422 } 3423 stealOrder.reset(uint32(nprocs)) 3424 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 3425 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 3426 return runnablePs 3427 } 3428 3429 // Associate p and the current m. 3430 func acquirep(_p_ *p) { 3431 acquirep1(_p_) 3432 3433 // have p; write barriers now allowed 3434 _g_ := getg() 3435 _g_.m.mcache = _p_.mcache 3436 3437 if trace.enabled { 3438 traceProcStart() 3439 } 3440 } 3441 3442 // May run during STW, so write barriers are not allowed. 3443 //go:nowritebarrier 3444 func acquirep1(_p_ *p) { 3445 _g_ := getg() 3446 3447 if _g_.m.p != 0 || _g_.m.mcache != nil { 3448 throw("acquirep: already in go") 3449 } 3450 if _p_.m != 0 || _p_.status != _Pidle { 3451 id := int32(0) 3452 if _p_.m != 0 { 3453 id = _p_.m.ptr().id 3454 } 3455 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 3456 throw("acquirep: invalid p state") 3457 } 3458 _g_.m.p.set(_p_) 3459 _p_.m.set(_g_.m) 3460 _p_.status = _Prunning 3461 } 3462 3463 // Disassociate p and the current m. 3464 func releasep() *p { 3465 _g_ := getg() 3466 3467 if _g_.m.p == 0 || _g_.m.mcache == nil { 3468 throw("releasep: invalid arg") 3469 } 3470 _p_ := _g_.m.p.ptr() 3471 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 3472 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 3473 throw("releasep: invalid p state") 3474 } 3475 if trace.enabled { 3476 traceProcStop(_g_.m.p.ptr()) 3477 } 3478 _g_.m.p = 0 3479 _g_.m.mcache = nil 3480 _p_.m = 0 3481 _p_.status = _Pidle 3482 return _p_ 3483 } 3484 3485 func incidlelocked(v int32) { 3486 lock(&sched.lock) 3487 sched.nmidlelocked += v 3488 if v > 0 { 3489 checkdead() 3490 } 3491 unlock(&sched.lock) 3492 } 3493 3494 // Check for deadlock situation. 3495 // The check is based on number of running M's, if 0 -> deadlock. 3496 func checkdead() { 3497 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 3498 // there are no running goroutines. The calling program is 3499 // assumed to be running. 3500 if islibrary || isarchive { 3501 return 3502 } 3503 3504 // If we are dying because of a signal caught on an already idle thread, 3505 // freezetheworld will cause all running threads to block. 3506 // And runtime will essentially enter into deadlock state, 3507 // except that there is a thread that will call exit soon. 3508 if panicking > 0 { 3509 return 3510 } 3511 3512 // -1 for sysmon 3513 run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1 3514 if run > 0 { 3515 return 3516 } 3517 if run < 0 { 3518 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n") 3519 throw("checkdead: inconsistent counts") 3520 } 3521 3522 grunning := 0 3523 lock(&allglock) 3524 for i := 0; i < len(allgs); i++ { 3525 gp := allgs[i] 3526 if isSystemGoroutine(gp) { 3527 continue 3528 } 3529 s := readgstatus(gp) 3530 switch s &^ _Gscan { 3531 case _Gwaiting: 3532 grunning++ 3533 case _Grunnable, 3534 _Grunning, 3535 _Gsyscall: 3536 unlock(&allglock) 3537 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 3538 throw("checkdead: runnable g") 3539 } 3540 } 3541 unlock(&allglock) 3542 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 3543 throw("no goroutines (main called runtime.Goexit) - deadlock!") 3544 } 3545 3546 // Maybe jump time forward for playground. 3547 gp := timejump() 3548 if gp != nil { 3549 casgstatus(gp, _Gwaiting, _Grunnable) 3550 globrunqput(gp) 3551 _p_ := pidleget() 3552 if _p_ == nil { 3553 throw("checkdead: no p for timer") 3554 } 3555 mp := mget() 3556 if mp == nil { 3557 // There should always be a free M since 3558 // nothing is running. 3559 throw("checkdead: no m for timer") 3560 } 3561 mp.nextp.set(_p_) 3562 notewakeup(&mp.park) 3563 return 3564 } 3565 3566 getg().m.throwing = -1 // do not dump full stacks 3567 throw("all goroutines are asleep - deadlock!") 3568 } 3569 3570 // forcegcperiod is the maximum time in nanoseconds between garbage 3571 // collections. If we go this long without a garbage collection, one 3572 // is forced to run. 3573 // 3574 // This is a variable for testing purposes. It normally doesn't change. 3575 var forcegcperiod int64 = 2 * 60 * 1e9 3576 3577 // Always runs without a P, so write barriers are not allowed. 3578 // 3579 //go:nowritebarrierrec 3580 func sysmon() { 3581 // If a heap span goes unused for 5 minutes after a garbage collection, 3582 // we hand it back to the operating system. 3583 scavengelimit := int64(5 * 60 * 1e9) 3584 3585 if debug.scavenge > 0 { 3586 // Scavenge-a-lot for testing. 3587 forcegcperiod = 10 * 1e6 3588 scavengelimit = 20 * 1e6 3589 } 3590 3591 lastscavenge := nanotime() 3592 nscavenge := 0 3593 3594 lasttrace := int64(0) 3595 idle := 0 // how many cycles in succession we had not wokeup somebody 3596 delay := uint32(0) 3597 for { 3598 if idle == 0 { // start with 20us sleep... 3599 delay = 20 3600 } else if idle > 50 { // start doubling the sleep after 1ms... 3601 delay *= 2 3602 } 3603 if delay > 10*1000 { // up to 10ms 3604 delay = 10 * 1000 3605 } 3606 usleep(delay) 3607 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic 3608 lock(&sched.lock) 3609 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 3610 atomic.Store(&sched.sysmonwait, 1) 3611 unlock(&sched.lock) 3612 // Make wake-up period small enough 3613 // for the sampling to be correct. 3614 maxsleep := forcegcperiod / 2 3615 if scavengelimit < forcegcperiod { 3616 maxsleep = scavengelimit / 2 3617 } 3618 notetsleep(&sched.sysmonnote, maxsleep) 3619 lock(&sched.lock) 3620 atomic.Store(&sched.sysmonwait, 0) 3621 noteclear(&sched.sysmonnote) 3622 idle = 0 3623 delay = 20 3624 } 3625 unlock(&sched.lock) 3626 } 3627 // poll network if not polled for more than 10ms 3628 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 3629 now := nanotime() 3630 unixnow := unixnanotime() 3631 if lastpoll != 0 && lastpoll+10*1000*1000 < now { 3632 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 3633 gp := netpoll(false) // non-blocking - returns list of goroutines 3634 if gp != nil { 3635 // Need to decrement number of idle locked M's 3636 // (pretending that one more is running) before injectglist. 3637 // Otherwise it can lead to the following situation: 3638 // injectglist grabs all P's but before it starts M's to run the P's, 3639 // another M returns from syscall, finishes running its G, 3640 // observes that there is no work to do and no other running M's 3641 // and reports deadlock. 3642 incidlelocked(-1) 3643 injectglist(gp) 3644 incidlelocked(1) 3645 } 3646 } 3647 // retake P's blocked in syscalls 3648 // and preempt long running G's 3649 if retake(now) != 0 { 3650 idle = 0 3651 } else { 3652 idle++ 3653 } 3654 // check if we need to force a GC 3655 lastgc := int64(atomic.Load64(&memstats.last_gc)) 3656 if gcphase == _GCoff && lastgc != 0 && unixnow-lastgc > forcegcperiod && atomic.Load(&forcegc.idle) != 0 { 3657 lock(&forcegc.lock) 3658 forcegc.idle = 0 3659 forcegc.g.schedlink = 0 3660 injectglist(forcegc.g) 3661 unlock(&forcegc.lock) 3662 } 3663 // scavenge heap once in a while 3664 if lastscavenge+scavengelimit/2 < now { 3665 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) 3666 lastscavenge = now 3667 nscavenge++ 3668 } 3669 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { 3670 lasttrace = now 3671 schedtrace(debug.scheddetail > 0) 3672 } 3673 } 3674 } 3675 3676 var pdesc [_MaxGomaxprocs]struct { 3677 schedtick uint32 3678 schedwhen int64 3679 syscalltick uint32 3680 syscallwhen int64 3681 } 3682 3683 // forcePreemptNS is the time slice given to a G before it is 3684 // preempted. 3685 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 3686 3687 func retake(now int64) uint32 { 3688 n := 0 3689 for i := int32(0); i < gomaxprocs; i++ { 3690 _p_ := allp[i] 3691 if _p_ == nil { 3692 continue 3693 } 3694 pd := &pdesc[i] 3695 s := _p_.status 3696 if s == _Psyscall { 3697 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 3698 t := int64(_p_.syscalltick) 3699 if int64(pd.syscalltick) != t { 3700 pd.syscalltick = uint32(t) 3701 pd.syscallwhen = now 3702 continue 3703 } 3704 // On the one hand we don't want to retake Ps if there is no other work to do, 3705 // but on the other hand we want to retake them eventually 3706 // because they can prevent the sysmon thread from deep sleep. 3707 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 3708 continue 3709 } 3710 // Need to decrement number of idle locked M's 3711 // (pretending that one more is running) before the CAS. 3712 // Otherwise the M from which we retake can exit the syscall, 3713 // increment nmidle and report deadlock. 3714 incidlelocked(-1) 3715 if atomic.Cas(&_p_.status, s, _Pidle) { 3716 if trace.enabled { 3717 traceGoSysBlock(_p_) 3718 traceProcStop(_p_) 3719 } 3720 n++ 3721 _p_.syscalltick++ 3722 handoffp(_p_) 3723 } 3724 incidlelocked(1) 3725 } else if s == _Prunning { 3726 // Preempt G if it's running for too long. 3727 t := int64(_p_.schedtick) 3728 if int64(pd.schedtick) != t { 3729 pd.schedtick = uint32(t) 3730 pd.schedwhen = now 3731 continue 3732 } 3733 if pd.schedwhen+forcePreemptNS > now { 3734 continue 3735 } 3736 preemptone(_p_) 3737 } 3738 } 3739 return uint32(n) 3740 } 3741 3742 // Tell all goroutines that they have been preempted and they should stop. 3743 // This function is purely best-effort. It can fail to inform a goroutine if a 3744 // processor just started running it. 3745 // No locks need to be held. 3746 // Returns true if preemption request was issued to at least one goroutine. 3747 func preemptall() bool { 3748 res := false 3749 for i := int32(0); i < gomaxprocs; i++ { 3750 _p_ := allp[i] 3751 if _p_ == nil || _p_.status != _Prunning { 3752 continue 3753 } 3754 if preemptone(_p_) { 3755 res = true 3756 } 3757 } 3758 return res 3759 } 3760 3761 // Tell the goroutine running on processor P to stop. 3762 // This function is purely best-effort. It can incorrectly fail to inform the 3763 // goroutine. It can send inform the wrong goroutine. Even if it informs the 3764 // correct goroutine, that goroutine might ignore the request if it is 3765 // simultaneously executing newstack. 3766 // No lock needs to be held. 3767 // Returns true if preemption request was issued. 3768 // The actual preemption will happen at some point in the future 3769 // and will be indicated by the gp->status no longer being 3770 // Grunning 3771 func preemptone(_p_ *p) bool { 3772 mp := _p_.m.ptr() 3773 if mp == nil || mp == getg().m { 3774 return false 3775 } 3776 gp := mp.curg 3777 if gp == nil || gp == mp.g0 { 3778 return false 3779 } 3780 3781 gp.preempt = true 3782 3783 // Every call in a go routine checks for stack overflow by 3784 // comparing the current stack pointer to gp->stackguard0. 3785 // Setting gp->stackguard0 to StackPreempt folds 3786 // preemption into the normal stack overflow check. 3787 gp.stackguard0 = stackPreempt 3788 return true 3789 } 3790 3791 var starttime int64 3792 3793 func schedtrace(detailed bool) { 3794 now := nanotime() 3795 if starttime == 0 { 3796 starttime = now 3797 } 3798 3799 lock(&sched.lock) 3800 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 3801 if detailed { 3802 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 3803 } 3804 // We must be careful while reading data from P's, M's and G's. 3805 // Even if we hold schedlock, most data can be changed concurrently. 3806 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 3807 for i := int32(0); i < gomaxprocs; i++ { 3808 _p_ := allp[i] 3809 if _p_ == nil { 3810 continue 3811 } 3812 mp := _p_.m.ptr() 3813 h := atomic.Load(&_p_.runqhead) 3814 t := atomic.Load(&_p_.runqtail) 3815 if detailed { 3816 id := int32(-1) 3817 if mp != nil { 3818 id = mp.id 3819 } 3820 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") 3821 } else { 3822 // In non-detailed mode format lengths of per-P run queues as: 3823 // [len1 len2 len3 len4] 3824 print(" ") 3825 if i == 0 { 3826 print("[") 3827 } 3828 print(t - h) 3829 if i == gomaxprocs-1 { 3830 print("]\n") 3831 } 3832 } 3833 } 3834 3835 if !detailed { 3836 unlock(&sched.lock) 3837 return 3838 } 3839 3840 for mp := allm; mp != nil; mp = mp.alllink { 3841 _p_ := mp.p.ptr() 3842 gp := mp.curg 3843 lockedg := mp.lockedg 3844 id1 := int32(-1) 3845 if _p_ != nil { 3846 id1 = _p_.id 3847 } 3848 id2 := int64(-1) 3849 if gp != nil { 3850 id2 = gp.goid 3851 } 3852 id3 := int64(-1) 3853 if lockedg != nil { 3854 id3 = lockedg.goid 3855 } 3856 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") 3857 } 3858 3859 lock(&allglock) 3860 for gi := 0; gi < len(allgs); gi++ { 3861 gp := allgs[gi] 3862 mp := gp.m 3863 lockedm := gp.lockedm 3864 id1 := int32(-1) 3865 if mp != nil { 3866 id1 = mp.id 3867 } 3868 id2 := int32(-1) 3869 if lockedm != nil { 3870 id2 = lockedm.id 3871 } 3872 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n") 3873 } 3874 unlock(&allglock) 3875 unlock(&sched.lock) 3876 } 3877 3878 // Put mp on midle list. 3879 // Sched must be locked. 3880 // May run during STW, so write barriers are not allowed. 3881 //go:nowritebarrier 3882 func mput(mp *m) { 3883 mp.schedlink = sched.midle 3884 sched.midle.set(mp) 3885 sched.nmidle++ 3886 checkdead() 3887 } 3888 3889 // Try to get an m from midle list. 3890 // Sched must be locked. 3891 // May run during STW, so write barriers are not allowed. 3892 //go:nowritebarrier 3893 func mget() *m { 3894 mp := sched.midle.ptr() 3895 if mp != nil { 3896 sched.midle = mp.schedlink 3897 sched.nmidle-- 3898 } 3899 return mp 3900 } 3901 3902 // Put gp on the global runnable queue. 3903 // Sched must be locked. 3904 // May run during STW, so write barriers are not allowed. 3905 //go:nowritebarrier 3906 func globrunqput(gp *g) { 3907 gp.schedlink = 0 3908 if sched.runqtail != 0 { 3909 sched.runqtail.ptr().schedlink.set(gp) 3910 } else { 3911 sched.runqhead.set(gp) 3912 } 3913 sched.runqtail.set(gp) 3914 sched.runqsize++ 3915 } 3916 3917 // Put gp at the head of the global runnable queue. 3918 // Sched must be locked. 3919 // May run during STW, so write barriers are not allowed. 3920 //go:nowritebarrier 3921 func globrunqputhead(gp *g) { 3922 gp.schedlink = sched.runqhead 3923 sched.runqhead.set(gp) 3924 if sched.runqtail == 0 { 3925 sched.runqtail.set(gp) 3926 } 3927 sched.runqsize++ 3928 } 3929 3930 // Put a batch of runnable goroutines on the global runnable queue. 3931 // Sched must be locked. 3932 func globrunqputbatch(ghead *g, gtail *g, n int32) { 3933 gtail.schedlink = 0 3934 if sched.runqtail != 0 { 3935 sched.runqtail.ptr().schedlink.set(ghead) 3936 } else { 3937 sched.runqhead.set(ghead) 3938 } 3939 sched.runqtail.set(gtail) 3940 sched.runqsize += n 3941 } 3942 3943 // Try get a batch of G's from the global runnable queue. 3944 // Sched must be locked. 3945 func globrunqget(_p_ *p, max int32) *g { 3946 if sched.runqsize == 0 { 3947 return nil 3948 } 3949 3950 n := sched.runqsize/gomaxprocs + 1 3951 if n > sched.runqsize { 3952 n = sched.runqsize 3953 } 3954 if max > 0 && n > max { 3955 n = max 3956 } 3957 if n > int32(len(_p_.runq))/2 { 3958 n = int32(len(_p_.runq)) / 2 3959 } 3960 3961 sched.runqsize -= n 3962 if sched.runqsize == 0 { 3963 sched.runqtail = 0 3964 } 3965 3966 gp := sched.runqhead.ptr() 3967 sched.runqhead = gp.schedlink 3968 n-- 3969 for ; n > 0; n-- { 3970 gp1 := sched.runqhead.ptr() 3971 sched.runqhead = gp1.schedlink 3972 runqput(_p_, gp1, false) 3973 } 3974 return gp 3975 } 3976 3977 // Put p to on _Pidle list. 3978 // Sched must be locked. 3979 // May run during STW, so write barriers are not allowed. 3980 //go:nowritebarrier 3981 func pidleput(_p_ *p) { 3982 if !runqempty(_p_) { 3983 throw("pidleput: P has non-empty run queue") 3984 } 3985 _p_.link = sched.pidle 3986 sched.pidle.set(_p_) 3987 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 3988 } 3989 3990 // Try get a p from _Pidle list. 3991 // Sched must be locked. 3992 // May run during STW, so write barriers are not allowed. 3993 //go:nowritebarrier 3994 func pidleget() *p { 3995 _p_ := sched.pidle.ptr() 3996 if _p_ != nil { 3997 sched.pidle = _p_.link 3998 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 3999 } 4000 return _p_ 4001 } 4002 4003 // runqempty returns true if _p_ has no Gs on its local run queue. 4004 // It never returns true spuriously. 4005 func runqempty(_p_ *p) bool { 4006 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, 4007 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext. 4008 // Simply observing that runqhead == runqtail and then observing that runqnext == nil 4009 // does not mean the queue is empty. 4010 for { 4011 head := atomic.Load(&_p_.runqhead) 4012 tail := atomic.Load(&_p_.runqtail) 4013 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext))) 4014 if tail == atomic.Load(&_p_.runqtail) { 4015 return head == tail && runnext == 0 4016 } 4017 } 4018 } 4019 4020 // To shake out latent assumptions about scheduling order, 4021 // we introduce some randomness into scheduling decisions 4022 // when running with the race detector. 4023 // The need for this was made obvious by changing the 4024 // (deterministic) scheduling order in Go 1.5 and breaking 4025 // many poorly-written tests. 4026 // With the randomness here, as long as the tests pass 4027 // consistently with -race, they shouldn't have latent scheduling 4028 // assumptions. 4029 const randomizeScheduler = raceenabled 4030 4031 // runqput tries to put g on the local runnable queue. 4032 // If next if false, runqput adds g to the tail of the runnable queue. 4033 // If next is true, runqput puts g in the _p_.runnext slot. 4034 // If the run queue is full, runnext puts g on the global queue. 4035 // Executed only by the owner P. 4036 func runqput(_p_ *p, gp *g, next bool) { 4037 if randomizeScheduler && next && fastrand1()%2 == 0 { 4038 next = false 4039 } 4040 4041 if next { 4042 retryNext: 4043 oldnext := _p_.runnext 4044 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 4045 goto retryNext 4046 } 4047 if oldnext == 0 { 4048 return 4049 } 4050 // Kick the old runnext out to the regular run queue. 4051 gp = oldnext.ptr() 4052 } 4053 4054 retry: 4055 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 4056 t := _p_.runqtail 4057 if t-h < uint32(len(_p_.runq)) { 4058 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 4059 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 4060 return 4061 } 4062 if runqputslow(_p_, gp, h, t) { 4063 return 4064 } 4065 // the queue is not full, now the put above must succeed 4066 goto retry 4067 } 4068 4069 // Put g and a batch of work from local runnable queue on global queue. 4070 // Executed only by the owner P. 4071 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 4072 var batch [len(_p_.runq)/2 + 1]*g 4073 4074 // First, grab a batch from local queue. 4075 n := t - h 4076 n = n / 2 4077 if n != uint32(len(_p_.runq)/2) { 4078 throw("runqputslow: queue is not full") 4079 } 4080 for i := uint32(0); i < n; i++ { 4081 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 4082 } 4083 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4084 return false 4085 } 4086 batch[n] = gp 4087 4088 if randomizeScheduler { 4089 for i := uint32(1); i <= n; i++ { 4090 j := fastrand1() % (i + 1) 4091 batch[i], batch[j] = batch[j], batch[i] 4092 } 4093 } 4094 4095 // Link the goroutines. 4096 for i := uint32(0); i < n; i++ { 4097 batch[i].schedlink.set(batch[i+1]) 4098 } 4099 4100 // Now put the batch on global queue. 4101 lock(&sched.lock) 4102 globrunqputbatch(batch[0], batch[n], int32(n+1)) 4103 unlock(&sched.lock) 4104 return true 4105 } 4106 4107 // Get g from local runnable queue. 4108 // If inheritTime is true, gp should inherit the remaining time in the 4109 // current time slice. Otherwise, it should start a new time slice. 4110 // Executed only by the owner P. 4111 func runqget(_p_ *p) (gp *g, inheritTime bool) { 4112 // If there's a runnext, it's the next G to run. 4113 for { 4114 next := _p_.runnext 4115 if next == 0 { 4116 break 4117 } 4118 if _p_.runnext.cas(next, 0) { 4119 return next.ptr(), true 4120 } 4121 } 4122 4123 for { 4124 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4125 t := _p_.runqtail 4126 if t == h { 4127 return nil, false 4128 } 4129 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 4130 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume 4131 return gp, false 4132 } 4133 } 4134 } 4135 4136 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 4137 // Batch is a ring buffer starting at batchHead. 4138 // Returns number of grabbed goroutines. 4139 // Can be executed by any P. 4140 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 4141 for { 4142 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4143 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer 4144 n := t - h 4145 n = n - n/2 4146 if n == 0 { 4147 if stealRunNextG { 4148 // Try to steal from _p_.runnext. 4149 if next := _p_.runnext; next != 0 { 4150 // Sleep to ensure that _p_ isn't about to run the g we 4151 // are about to steal. 4152 // The important use case here is when the g running on _p_ 4153 // ready()s another g and then almost immediately blocks. 4154 // Instead of stealing runnext in this window, back off 4155 // to give _p_ a chance to schedule runnext. This will avoid 4156 // thrashing gs between different Ps. 4157 // A sync chan send/recv takes ~50ns as of time of writing, 4158 // so 3us gives ~50x overshoot. 4159 if GOOS != "windows" { 4160 usleep(3) 4161 } else { 4162 // On windows system timer granularity is 1-15ms, 4163 // which is way too much for this optimization. 4164 // So just yield. 4165 osyield() 4166 } 4167 if !_p_.runnext.cas(next, 0) { 4168 continue 4169 } 4170 batch[batchHead%uint32(len(batch))] = next 4171 return 1 4172 } 4173 } 4174 return 0 4175 } 4176 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 4177 continue 4178 } 4179 for i := uint32(0); i < n; i++ { 4180 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 4181 batch[(batchHead+i)%uint32(len(batch))] = g 4182 } 4183 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4184 return n 4185 } 4186 } 4187 } 4188 4189 // Steal half of elements from local runnable queue of p2 4190 // and put onto local runnable queue of p. 4191 // Returns one of the stolen elements (or nil if failed). 4192 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 4193 t := _p_.runqtail 4194 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 4195 if n == 0 { 4196 return nil 4197 } 4198 n-- 4199 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 4200 if n == 0 { 4201 return gp 4202 } 4203 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 4204 if t-h+n >= uint32(len(_p_.runq)) { 4205 throw("runqsteal: runq overflow") 4206 } 4207 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 4208 return gp 4209 } 4210 4211 //go:linkname setMaxThreads runtime/debug.setMaxThreads 4212 func setMaxThreads(in int) (out int) { 4213 lock(&sched.lock) 4214 out = int(sched.maxmcount) 4215 sched.maxmcount = int32(in) 4216 checkmcount() 4217 unlock(&sched.lock) 4218 return 4219 } 4220 4221 func haveexperiment(name string) bool { 4222 if name == "framepointer" { 4223 return framepointer_enabled // set by linker 4224 } 4225 x := sys.Goexperiment 4226 for x != "" { 4227 xname := "" 4228 i := index(x, ",") 4229 if i < 0 { 4230 xname, x = x, "" 4231 } else { 4232 xname, x = x[:i], x[i+1:] 4233 } 4234 if xname == name { 4235 return true 4236 } 4237 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name { 4238 return false 4239 } 4240 } 4241 return false 4242 } 4243 4244 //go:nosplit 4245 func procPin() int { 4246 _g_ := getg() 4247 mp := _g_.m 4248 4249 mp.locks++ 4250 return int(mp.p.ptr().id) 4251 } 4252 4253 //go:nosplit 4254 func procUnpin() { 4255 _g_ := getg() 4256 _g_.m.locks-- 4257 } 4258 4259 //go:linkname sync_runtime_procPin sync.runtime_procPin 4260 //go:nosplit 4261 func sync_runtime_procPin() int { 4262 return procPin() 4263 } 4264 4265 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 4266 //go:nosplit 4267 func sync_runtime_procUnpin() { 4268 procUnpin() 4269 } 4270 4271 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 4272 //go:nosplit 4273 func sync_atomic_runtime_procPin() int { 4274 return procPin() 4275 } 4276 4277 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 4278 //go:nosplit 4279 func sync_atomic_runtime_procUnpin() { 4280 procUnpin() 4281 } 4282 4283 // Active spinning for sync.Mutex. 4284 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 4285 //go:nosplit 4286 func sync_runtime_canSpin(i int) bool { 4287 // sync.Mutex is cooperative, so we are conservative with spinning. 4288 // Spin only few times and only if running on a multicore machine and 4289 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 4290 // As opposed to runtime mutex we don't do passive spinning here, 4291 // because there can be work on global runq on on other Ps. 4292 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 4293 return false 4294 } 4295 if p := getg().m.p.ptr(); !runqempty(p) { 4296 return false 4297 } 4298 return true 4299 } 4300 4301 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 4302 //go:nosplit 4303 func sync_runtime_doSpin() { 4304 procyield(active_spin_cnt) 4305 } 4306 4307 var stealOrder randomOrder 4308 4309 // randomOrder/randomEnum are helper types for randomized work stealing. 4310 // They allow to enumerate all Ps in different pseudo-random orders without repetitions. 4311 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS 4312 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration. 4313 type randomOrder struct { 4314 count uint32 4315 coprimes []uint32 4316 } 4317 4318 type randomEnum struct { 4319 i uint32 4320 count uint32 4321 pos uint32 4322 inc uint32 4323 } 4324 4325 func (ord *randomOrder) reset(count uint32) { 4326 ord.count = count 4327 ord.coprimes = ord.coprimes[:0] 4328 for i := uint32(1); i <= count; i++ { 4329 if gcd(i, count) == 1 { 4330 ord.coprimes = append(ord.coprimes, i) 4331 } 4332 } 4333 } 4334 4335 func (ord *randomOrder) start(i uint32) randomEnum { 4336 return randomEnum{ 4337 count: ord.count, 4338 pos: i % ord.count, 4339 inc: ord.coprimes[i%uint32(len(ord.coprimes))], 4340 } 4341 } 4342 4343 func (enum *randomEnum) done() bool { 4344 return enum.i == enum.count 4345 } 4346 4347 func (enum *randomEnum) next() { 4348 enum.i++ 4349 enum.pos = (enum.pos + enum.inc) % enum.count 4350 } 4351 4352 func (enum *randomEnum) position() uint32 { 4353 return enum.pos 4354 } 4355 4356 func gcd(a, b uint32) uint32 { 4357 for b != 0 { 4358 a, b = b, a%b 4359 } 4360 return a 4361 }