github.com/aloncn/graphics-go@v0.0.1/src/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 var buildVersion = sys.TheVersion 14 15 // Goroutine scheduler 16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 17 // 18 // The main concepts are: 19 // G - goroutine. 20 // M - worker thread, or machine. 21 // P - processor, a resource that is required to execute Go code. 22 // M must have an associated P to execute Go code, however it can be 23 // blocked or in a syscall w/o an associated P. 24 // 25 // Design doc at https://golang.org/s/go11sched. 26 27 // Worker thread parking/unparking. 28 // We need to balance between keeping enough running worker threads to utilize 29 // available hardware parallelism and parking excessive running worker threads 30 // to conserve CPU resources and power. This is not simple for two reasons: 31 // (1) scheduler state is intentionally distributed (in particular, per-P work 32 // queues), so it is not possible to compute global predicates on fast paths; 33 // (2) for optimal thread management we would need to know the future (don't park 34 // a worker thread when a new goroutine will be readied in near future). 35 // 36 // Three rejected approaches that would work badly: 37 // 1. Centralize all scheduler state (would inhibit scalability). 38 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there 39 // is a spare P, unpark a thread and handoff it the thread and the goroutine. 40 // This would lead to thread state thrashing, as the thread that readied the 41 // goroutine can be out of work the very next moment, we will need to park it. 42 // Also, it would destroy locality of computation as we want to preserve 43 // dependent goroutines on the same thread; and introduce additional latency. 44 // 3. Unpark an additional thread whenever we ready a goroutine and there is an 45 // idle P, but don't do handoff. This would lead to excessive thread parking/ 46 // unparking as the additional threads will instantly park without discovering 47 // any work to do. 48 // 49 // The current approach: 50 // We unpark an additional thread when we ready a goroutine if (1) there is an 51 // idle P and there are no "spinning" worker threads. A worker thread is considered 52 // spinning if it is out of local work and did not find work in global run queue/ 53 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. 54 // Threads unparked this way are also considered spinning; we don't do goroutine 55 // handoff so such threads are out of work initially. Spinning threads do some 56 // spinning looking for work in per-P run queues before parking. If a spinning 57 // thread finds work it takes itself out of the spinning state and proceeds to 58 // execution. If it does not find work it takes itself out of the spinning state 59 // and then parks. 60 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark 61 // new threads when readying goroutines. To compensate for that, if the last spinning 62 // thread finds work and stops spinning, it must unpark a new spinning thread. 63 // This approach smooths out unjustified spikes of thread unparking, 64 // but at the same time guarantees eventual maximal CPU parallelism utilization. 65 // 66 // The main implementation complication is that we need to be very careful during 67 // spinning->non-spinning thread transition. This transition can race with submission 68 // of a new goroutine, and either one part or another needs to unpark another worker 69 // thread. If they both fail to do that, we can end up with semi-persistent CPU 70 // underutilization. The general pattern for goroutine readying is: submit a goroutine 71 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. 72 // The general pattern for spinning->non-spinning transition is: decrement nmspinning, 73 // #StoreLoad-style memory barrier, check all per-P work queues for new work. 74 // Note that all this complexity does not apply to global run queue as we are not 75 // sloppy about thread unparking when submitting to global queue. Also see comments 76 // for nmspinning manipulation. 77 78 var ( 79 m0 m 80 g0 g 81 ) 82 83 //go:linkname runtime_init runtime.init 84 func runtime_init() 85 86 //go:linkname main_init main.init 87 func main_init() 88 89 // main_init_done is a signal used by cgocallbackg that initialization 90 // has been completed. It is made before _cgo_notify_runtime_init_done, 91 // so all cgo calls can rely on it existing. When main_init is complete, 92 // it is closed, meaning cgocallbackg can reliably receive from it. 93 var main_init_done chan bool 94 95 //go:linkname main_main main.main 96 func main_main() 97 98 // runtimeInitTime is the nanotime() at which the runtime started. 99 var runtimeInitTime int64 100 101 // Value to use for signal mask for newly created M's. 102 var initSigmask sigset 103 104 // The main goroutine. 105 func main() { 106 g := getg() 107 108 // Racectx of m0->g0 is used only as the parent of the main goroutine. 109 // It must not be used for anything else. 110 g.m.g0.racectx = 0 111 112 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 113 // Using decimal instead of binary GB and MB because 114 // they look nicer in the stack overflow failure message. 115 if sys.PtrSize == 8 { 116 maxstacksize = 1000000000 117 } else { 118 maxstacksize = 250000000 119 } 120 121 // Record when the world started. 122 runtimeInitTime = nanotime() 123 124 systemstack(func() { 125 newm(sysmon, nil) 126 }) 127 128 // Lock the main goroutine onto this, the main OS thread, 129 // during initialization. Most programs won't care, but a few 130 // do require certain calls to be made by the main thread. 131 // Those can arrange for main.main to run in the main thread 132 // by calling runtime.LockOSThread during initialization 133 // to preserve the lock. 134 lockOSThread() 135 136 if g.m != &m0 { 137 throw("runtime.main not on m0") 138 } 139 140 runtime_init() // must be before defer 141 142 // Defer unlock so that runtime.Goexit during init does the unlock too. 143 needUnlock := true 144 defer func() { 145 if needUnlock { 146 unlockOSThread() 147 } 148 }() 149 150 gcenable() 151 152 main_init_done = make(chan bool) 153 if iscgo { 154 if _cgo_thread_start == nil { 155 throw("_cgo_thread_start missing") 156 } 157 if _cgo_malloc == nil { 158 throw("_cgo_malloc missing") 159 } 160 if _cgo_free == nil { 161 throw("_cgo_free missing") 162 } 163 if GOOS != "windows" { 164 if _cgo_setenv == nil { 165 throw("_cgo_setenv missing") 166 } 167 if _cgo_unsetenv == nil { 168 throw("_cgo_unsetenv missing") 169 } 170 } 171 if _cgo_notify_runtime_init_done == nil { 172 throw("_cgo_notify_runtime_init_done missing") 173 } 174 cgocall(_cgo_notify_runtime_init_done, nil) 175 } 176 177 main_init() 178 close(main_init_done) 179 180 needUnlock = false 181 unlockOSThread() 182 183 if isarchive || islibrary { 184 // A program compiled with -buildmode=c-archive or c-shared 185 // has a main, but it is not executed. 186 return 187 } 188 main_main() 189 if raceenabled { 190 racefini() 191 } 192 193 // Make racy client program work: if panicking on 194 // another goroutine at the same time as main returns, 195 // let the other goroutine finish printing the panic trace. 196 // Once it does, it will exit. See issue 3934. 197 if panicking != 0 { 198 gopark(nil, nil, "panicwait", traceEvGoStop, 1) 199 } 200 201 exit(0) 202 for { 203 var x *int32 204 *x = 0 205 } 206 } 207 208 // os_beforeExit is called from os.Exit(0). 209 //go:linkname os_beforeExit os.runtime_beforeExit 210 func os_beforeExit() { 211 if raceenabled { 212 racefini() 213 } 214 } 215 216 // start forcegc helper goroutine 217 func init() { 218 go forcegchelper() 219 } 220 221 func forcegchelper() { 222 forcegc.g = getg() 223 for { 224 lock(&forcegc.lock) 225 if forcegc.idle != 0 { 226 throw("forcegc: phase error") 227 } 228 atomic.Store(&forcegc.idle, 1) 229 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1) 230 // this goroutine is explicitly resumed by sysmon 231 if debug.gctrace > 0 { 232 println("GC forced") 233 } 234 gcStart(gcBackgroundMode, true) 235 } 236 } 237 238 //go:nosplit 239 240 // Gosched yields the processor, allowing other goroutines to run. It does not 241 // suspend the current goroutine, so execution resumes automatically. 242 func Gosched() { 243 mcall(gosched_m) 244 } 245 246 // Puts the current goroutine into a waiting state and calls unlockf. 247 // If unlockf returns false, the goroutine is resumed. 248 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) { 249 mp := acquirem() 250 gp := mp.curg 251 status := readgstatus(gp) 252 if status != _Grunning && status != _Gscanrunning { 253 throw("gopark: bad g status") 254 } 255 mp.waitlock = lock 256 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) 257 gp.waitreason = reason 258 mp.waittraceev = traceEv 259 mp.waittraceskip = traceskip 260 releasem(mp) 261 // can't do anything that might move the G between Ms here. 262 mcall(park_m) 263 } 264 265 // Puts the current goroutine into a waiting state and unlocks the lock. 266 // The goroutine can be made runnable again by calling goready(gp). 267 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) { 268 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 269 } 270 271 func goready(gp *g, traceskip int) { 272 systemstack(func() { 273 ready(gp, traceskip) 274 }) 275 } 276 277 //go:nosplit 278 func acquireSudog() *sudog { 279 // Delicate dance: the semaphore implementation calls 280 // acquireSudog, acquireSudog calls new(sudog), 281 // new calls malloc, malloc can call the garbage collector, 282 // and the garbage collector calls the semaphore implementation 283 // in stopTheWorld. 284 // Break the cycle by doing acquirem/releasem around new(sudog). 285 // The acquirem/releasem increments m.locks during new(sudog), 286 // which keeps the garbage collector from being invoked. 287 mp := acquirem() 288 pp := mp.p.ptr() 289 if len(pp.sudogcache) == 0 { 290 lock(&sched.sudoglock) 291 // First, try to grab a batch from central cache. 292 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 293 s := sched.sudogcache 294 sched.sudogcache = s.next 295 s.next = nil 296 pp.sudogcache = append(pp.sudogcache, s) 297 } 298 unlock(&sched.sudoglock) 299 // If the central cache is empty, allocate a new one. 300 if len(pp.sudogcache) == 0 { 301 pp.sudogcache = append(pp.sudogcache, new(sudog)) 302 } 303 } 304 n := len(pp.sudogcache) 305 s := pp.sudogcache[n-1] 306 pp.sudogcache[n-1] = nil 307 pp.sudogcache = pp.sudogcache[:n-1] 308 if s.elem != nil { 309 throw("acquireSudog: found s.elem != nil in cache") 310 } 311 releasem(mp) 312 return s 313 } 314 315 //go:nosplit 316 func releaseSudog(s *sudog) { 317 if s.elem != nil { 318 throw("runtime: sudog with non-nil elem") 319 } 320 if s.selectdone != nil { 321 throw("runtime: sudog with non-nil selectdone") 322 } 323 if s.next != nil { 324 throw("runtime: sudog with non-nil next") 325 } 326 if s.prev != nil { 327 throw("runtime: sudog with non-nil prev") 328 } 329 if s.waitlink != nil { 330 throw("runtime: sudog with non-nil waitlink") 331 } 332 gp := getg() 333 if gp.param != nil { 334 throw("runtime: releaseSudog with non-nil gp.param") 335 } 336 mp := acquirem() // avoid rescheduling to another P 337 pp := mp.p.ptr() 338 if len(pp.sudogcache) == cap(pp.sudogcache) { 339 // Transfer half of local cache to the central cache. 340 var first, last *sudog 341 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 342 n := len(pp.sudogcache) 343 p := pp.sudogcache[n-1] 344 pp.sudogcache[n-1] = nil 345 pp.sudogcache = pp.sudogcache[:n-1] 346 if first == nil { 347 first = p 348 } else { 349 last.next = p 350 } 351 last = p 352 } 353 lock(&sched.sudoglock) 354 last.next = sched.sudogcache 355 sched.sudogcache = first 356 unlock(&sched.sudoglock) 357 } 358 pp.sudogcache = append(pp.sudogcache, s) 359 releasem(mp) 360 } 361 362 // funcPC returns the entry PC of the function f. 363 // It assumes that f is a func value. Otherwise the behavior is undefined. 364 //go:nosplit 365 func funcPC(f interface{}) uintptr { 366 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize)) 367 } 368 369 // called from assembly 370 func badmcall(fn func(*g)) { 371 throw("runtime: mcall called on m->g0 stack") 372 } 373 374 func badmcall2(fn func(*g)) { 375 throw("runtime: mcall function returned") 376 } 377 378 func badreflectcall() { 379 panic("runtime: arg size to reflect.call more than 1GB") 380 } 381 382 func lockedOSThread() bool { 383 gp := getg() 384 return gp.lockedm != nil && gp.m.lockedg != nil 385 } 386 387 var ( 388 allgs []*g 389 allglock mutex 390 ) 391 392 func allgadd(gp *g) { 393 if readgstatus(gp) == _Gidle { 394 throw("allgadd: bad status Gidle") 395 } 396 397 lock(&allglock) 398 allgs = append(allgs, gp) 399 allglen = uintptr(len(allgs)) 400 unlock(&allglock) 401 } 402 403 const ( 404 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 405 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 406 _GoidCacheBatch = 16 407 ) 408 409 // The bootstrap sequence is: 410 // 411 // call osinit 412 // call schedinit 413 // make & queue new G 414 // call runtime·mstart 415 // 416 // The new G calls runtime·main. 417 func schedinit() { 418 // raceinit must be the first call to race detector. 419 // In particular, it must be done before mallocinit below calls racemapshadow. 420 _g_ := getg() 421 if raceenabled { 422 _g_.racectx = raceinit() 423 } 424 425 sched.maxmcount = 10000 426 427 // Cache the framepointer experiment. This affects stack unwinding. 428 framepointer_enabled = haveexperiment("framepointer") 429 430 tracebackinit() 431 moduledataverify() 432 stackinit() 433 mallocinit() 434 mcommoninit(_g_.m) 435 436 msigsave(_g_.m) 437 initSigmask = _g_.m.sigmask 438 439 goargs() 440 goenvs() 441 parsedebugvars() 442 gcinit() 443 444 sched.lastpoll = uint64(nanotime()) 445 procs := int(ncpu) 446 if procs > _MaxGomaxprocs { 447 procs = _MaxGomaxprocs 448 } 449 if n := atoi(gogetenv("GOMAXPROCS")); n > 0 { 450 if n > _MaxGomaxprocs { 451 n = _MaxGomaxprocs 452 } 453 procs = n 454 } 455 if procresize(int32(procs)) != nil { 456 throw("unknown runnable goroutine during bootstrap") 457 } 458 459 if buildVersion == "" { 460 // Condition should never trigger. This code just serves 461 // to ensure runtime·buildVersion is kept in the resulting binary. 462 buildVersion = "unknown" 463 } 464 } 465 466 func dumpgstatus(gp *g) { 467 _g_ := getg() 468 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 469 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 470 } 471 472 func checkmcount() { 473 // sched lock is held 474 if sched.mcount > sched.maxmcount { 475 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 476 throw("thread exhaustion") 477 } 478 } 479 480 func mcommoninit(mp *m) { 481 _g_ := getg() 482 483 // g0 stack won't make sense for user (and is not necessary unwindable). 484 if _g_ != _g_.m.g0 { 485 callers(1, mp.createstack[:]) 486 } 487 488 mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks()) 489 if mp.fastrand == 0 { 490 mp.fastrand = 0x49f6428a 491 } 492 493 lock(&sched.lock) 494 mp.id = sched.mcount 495 sched.mcount++ 496 checkmcount() 497 mpreinit(mp) 498 if mp.gsignal != nil { 499 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 500 } 501 502 // Add to allm so garbage collector doesn't free g->m 503 // when it is just in a register or thread-local storage. 504 mp.alllink = allm 505 506 // NumCgoCall() iterates over allm w/o schedlock, 507 // so we need to publish it safely. 508 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 509 unlock(&sched.lock) 510 } 511 512 // Mark gp ready to run. 513 func ready(gp *g, traceskip int) { 514 if trace.enabled { 515 traceGoUnpark(gp, traceskip) 516 } 517 518 status := readgstatus(gp) 519 520 // Mark runnable. 521 _g_ := getg() 522 _g_.m.locks++ // disable preemption because it can be holding p in a local var 523 if status&^_Gscan != _Gwaiting { 524 dumpgstatus(gp) 525 throw("bad g->status in ready") 526 } 527 528 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 529 casgstatus(gp, _Gwaiting, _Grunnable) 530 runqput(_g_.m.p.ptr(), gp, true) 531 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { // TODO: fast atomic 532 wakep() 533 } 534 _g_.m.locks-- 535 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack 536 _g_.stackguard0 = stackPreempt 537 } 538 } 539 540 func gcprocs() int32 { 541 // Figure out how many CPUs to use during GC. 542 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 543 lock(&sched.lock) 544 n := gomaxprocs 545 if n > ncpu { 546 n = ncpu 547 } 548 if n > _MaxGcproc { 549 n = _MaxGcproc 550 } 551 if n > sched.nmidle+1 { // one M is currently running 552 n = sched.nmidle + 1 553 } 554 unlock(&sched.lock) 555 return n 556 } 557 558 func needaddgcproc() bool { 559 lock(&sched.lock) 560 n := gomaxprocs 561 if n > ncpu { 562 n = ncpu 563 } 564 if n > _MaxGcproc { 565 n = _MaxGcproc 566 } 567 n -= sched.nmidle + 1 // one M is currently running 568 unlock(&sched.lock) 569 return n > 0 570 } 571 572 func helpgc(nproc int32) { 573 _g_ := getg() 574 lock(&sched.lock) 575 pos := 0 576 for n := int32(1); n < nproc; n++ { // one M is currently running 577 if allp[pos].mcache == _g_.m.mcache { 578 pos++ 579 } 580 mp := mget() 581 if mp == nil { 582 throw("gcprocs inconsistency") 583 } 584 mp.helpgc = n 585 mp.p.set(allp[pos]) 586 mp.mcache = allp[pos].mcache 587 pos++ 588 notewakeup(&mp.park) 589 } 590 unlock(&sched.lock) 591 } 592 593 // freezeStopWait is a large value that freezetheworld sets 594 // sched.stopwait to in order to request that all Gs permanently stop. 595 const freezeStopWait = 0x7fffffff 596 597 // Similar to stopTheWorld but best-effort and can be called several times. 598 // There is no reverse operation, used during crashing. 599 // This function must not lock any mutexes. 600 func freezetheworld() { 601 // stopwait and preemption requests can be lost 602 // due to races with concurrently executing threads, 603 // so try several times 604 for i := 0; i < 5; i++ { 605 // this should tell the scheduler to not start any new goroutines 606 sched.stopwait = freezeStopWait 607 atomic.Store(&sched.gcwaiting, 1) 608 // this should stop running goroutines 609 if !preemptall() { 610 break // no running goroutines 611 } 612 usleep(1000) 613 } 614 // to be sure 615 usleep(1000) 616 preemptall() 617 usleep(1000) 618 } 619 620 func isscanstatus(status uint32) bool { 621 if status == _Gscan { 622 throw("isscanstatus: Bad status Gscan") 623 } 624 return status&_Gscan == _Gscan 625 } 626 627 // All reads and writes of g's status go through readgstatus, casgstatus 628 // castogscanstatus, casfrom_Gscanstatus. 629 //go:nosplit 630 func readgstatus(gp *g) uint32 { 631 return atomic.Load(&gp.atomicstatus) 632 } 633 634 // Ownership of gscanvalid: 635 // 636 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), 637 // then gp owns gp.gscanvalid, and other goroutines must not modify it. 638 // 639 // Otherwise, a second goroutine can lock the scan state by setting _Gscan 640 // in the status bit and then modify gscanvalid, and then unlock the scan state. 641 // 642 // Note that the first condition implies an exception to the second: 643 // if a second goroutine changes gp's status to _Grunning|_Gscan, 644 // that second goroutine still does not have the right to modify gscanvalid. 645 646 // The Gscanstatuses are acting like locks and this releases them. 647 // If it proves to be a performance hit we should be able to make these 648 // simple atomic stores but for now we are going to throw if 649 // we see an inconsistent state. 650 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 651 success := false 652 653 // Check that transition is valid. 654 switch oldval { 655 default: 656 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 657 dumpgstatus(gp) 658 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 659 case _Gscanrunnable, 660 _Gscanwaiting, 661 _Gscanrunning, 662 _Gscansyscall: 663 if newval == oldval&^_Gscan { 664 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 665 } 666 case _Gscanenqueue: 667 if newval == _Gwaiting { 668 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 669 } 670 } 671 if !success { 672 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 673 dumpgstatus(gp) 674 throw("casfrom_Gscanstatus: gp->status is not in scan state") 675 } 676 if newval == _Grunning { 677 gp.gcscanvalid = false 678 } 679 } 680 681 // This will return false if the gp is not in the expected status and the cas fails. 682 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 683 func castogscanstatus(gp *g, oldval, newval uint32) bool { 684 switch oldval { 685 case _Grunnable, 686 _Gwaiting, 687 _Gsyscall: 688 if newval == oldval|_Gscan { 689 return atomic.Cas(&gp.atomicstatus, oldval, newval) 690 } 691 case _Grunning: 692 if newval == _Gscanrunning || newval == _Gscanenqueue { 693 return atomic.Cas(&gp.atomicstatus, oldval, newval) 694 } 695 } 696 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 697 throw("castogscanstatus") 698 panic("not reached") 699 } 700 701 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 702 // and casfrom_Gscanstatus instead. 703 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 704 // put it in the Gscan state is finished. 705 //go:nosplit 706 func casgstatus(gp *g, oldval, newval uint32) { 707 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 708 systemstack(func() { 709 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 710 throw("casgstatus: bad incoming values") 711 }) 712 } 713 714 if oldval == _Grunning && gp.gcscanvalid { 715 // If oldvall == _Grunning, then the actual status must be 716 // _Grunning or _Grunning|_Gscan; either way, 717 // we own gp.gcscanvalid, so it's safe to read. 718 // gp.gcscanvalid must not be true when we are running. 719 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n") 720 throw("casgstatus") 721 } 722 723 // loop if gp->atomicstatus is in a scan state giving 724 // GC time to finish and change the state to oldval. 725 for !atomic.Cas(&gp.atomicstatus, oldval, newval) { 726 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 727 systemstack(func() { 728 throw("casgstatus: waiting for Gwaiting but is Grunnable") 729 }) 730 } 731 // Help GC if needed. 732 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { 733 // gp.preemptscan = false 734 // systemstack(func() { 735 // gcphasework(gp) 736 // }) 737 // } 738 } 739 if newval == _Grunning { 740 gp.gcscanvalid = false 741 } 742 } 743 744 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 745 // Returns old status. Cannot call casgstatus directly, because we are racing with an 746 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 747 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 748 // it would loop waiting for the status to go back to Gwaiting, which it never will. 749 //go:nosplit 750 func casgcopystack(gp *g) uint32 { 751 for { 752 oldstatus := readgstatus(gp) &^ _Gscan 753 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 754 throw("copystack: bad status, not Gwaiting or Grunnable") 755 } 756 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 757 return oldstatus 758 } 759 } 760 } 761 762 // scang blocks until gp's stack has been scanned. 763 // It might be scanned by scang or it might be scanned by the goroutine itself. 764 // Either way, the stack scan has completed when scang returns. 765 func scang(gp *g) { 766 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone. 767 // Nothing is racing with us now, but gcscandone might be set to true left over 768 // from an earlier round of stack scanning (we scan twice per GC). 769 // We use gcscandone to record whether the scan has been done during this round. 770 // It is important that the scan happens exactly once: if called twice, 771 // the installation of stack barriers will detect the double scan and die. 772 773 gp.gcscandone = false 774 775 // Endeavor to get gcscandone set to true, 776 // either by doing the stack scan ourselves or by coercing gp to scan itself. 777 // gp.gcscandone can transition from false to true when we're not looking 778 // (if we asked for preemption), so any time we lock the status using 779 // castogscanstatus we have to double-check that the scan is still not done. 780 for !gp.gcscandone { 781 switch s := readgstatus(gp); s { 782 default: 783 dumpgstatus(gp) 784 throw("stopg: invalid status") 785 786 case _Gdead: 787 // No stack. 788 gp.gcscandone = true 789 790 case _Gcopystack: 791 // Stack being switched. Go around again. 792 793 case _Grunnable, _Gsyscall, _Gwaiting: 794 // Claim goroutine by setting scan bit. 795 // Racing with execution or readying of gp. 796 // The scan bit keeps them from running 797 // the goroutine until we're done. 798 if castogscanstatus(gp, s, s|_Gscan) { 799 if !gp.gcscandone { 800 scanstack(gp) 801 gp.gcscandone = true 802 } 803 restartg(gp) 804 } 805 806 case _Gscanwaiting: 807 // newstack is doing a scan for us right now. Wait. 808 809 case _Grunning: 810 // Goroutine running. Try to preempt execution so it can scan itself. 811 // The preemption handler (in newstack) does the actual scan. 812 813 // Optimization: if there is already a pending preemption request 814 // (from the previous loop iteration), don't bother with the atomics. 815 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt { 816 break 817 } 818 819 // Ask for preemption and self scan. 820 if castogscanstatus(gp, _Grunning, _Gscanrunning) { 821 if !gp.gcscandone { 822 gp.preemptscan = true 823 gp.preempt = true 824 gp.stackguard0 = stackPreempt 825 } 826 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning) 827 } 828 } 829 } 830 831 gp.preemptscan = false // cancel scan request if no longer needed 832 } 833 834 // The GC requests that this routine be moved from a scanmumble state to a mumble state. 835 func restartg(gp *g) { 836 s := readgstatus(gp) 837 switch s { 838 default: 839 dumpgstatus(gp) 840 throw("restartg: unexpected status") 841 842 case _Gdead: 843 // ok 844 845 case _Gscanrunnable, 846 _Gscanwaiting, 847 _Gscansyscall: 848 casfrom_Gscanstatus(gp, s, s&^_Gscan) 849 850 // Scan is now completed. 851 // Goroutine now needs to be made runnable. 852 // We put it on the global run queue; ready blocks on the global scheduler lock. 853 case _Gscanenqueue: 854 casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting) 855 if gp != getg().m.curg { 856 throw("processing Gscanenqueue on wrong m") 857 } 858 dropg() 859 ready(gp, 0) 860 } 861 } 862 863 // stopTheWorld stops all P's from executing goroutines, interrupting 864 // all goroutines at GC safe points and records reason as the reason 865 // for the stop. On return, only the current goroutine's P is running. 866 // stopTheWorld must not be called from a system stack and the caller 867 // must not hold worldsema. The caller must call startTheWorld when 868 // other P's should resume execution. 869 // 870 // stopTheWorld is safe for multiple goroutines to call at the 871 // same time. Each will execute its own stop, and the stops will 872 // be serialized. 873 // 874 // This is also used by routines that do stack dumps. If the system is 875 // in panic or being exited, this may not reliably stop all 876 // goroutines. 877 func stopTheWorld(reason string) { 878 semacquire(&worldsema, false) 879 getg().m.preemptoff = reason 880 systemstack(stopTheWorldWithSema) 881 } 882 883 // startTheWorld undoes the effects of stopTheWorld. 884 func startTheWorld() { 885 systemstack(startTheWorldWithSema) 886 // worldsema must be held over startTheWorldWithSema to ensure 887 // gomaxprocs cannot change while worldsema is held. 888 semrelease(&worldsema) 889 getg().m.preemptoff = "" 890 } 891 892 // Holding worldsema grants an M the right to try to stop the world 893 // and prevents gomaxprocs from changing concurrently. 894 var worldsema uint32 = 1 895 896 // stopTheWorldWithSema is the core implementation of stopTheWorld. 897 // The caller is responsible for acquiring worldsema and disabling 898 // preemption first and then should stopTheWorldWithSema on the system 899 // stack: 900 // 901 // semacquire(&worldsema, false) 902 // m.preemptoff = "reason" 903 // systemstack(stopTheWorldWithSema) 904 // 905 // When finished, the caller must either call startTheWorld or undo 906 // these three operations separately: 907 // 908 // m.preemptoff = "" 909 // systemstack(startTheWorldWithSema) 910 // semrelease(&worldsema) 911 // 912 // It is allowed to acquire worldsema once and then execute multiple 913 // startTheWorldWithSema/stopTheWorldWithSema pairs. 914 // Other P's are able to execute between successive calls to 915 // startTheWorldWithSema and stopTheWorldWithSema. 916 // Holding worldsema causes any other goroutines invoking 917 // stopTheWorld to block. 918 func stopTheWorldWithSema() { 919 _g_ := getg() 920 921 // If we hold a lock, then we won't be able to stop another M 922 // that is blocked trying to acquire the lock. 923 if _g_.m.locks > 0 { 924 throw("stopTheWorld: holding locks") 925 } 926 927 lock(&sched.lock) 928 sched.stopwait = gomaxprocs 929 atomic.Store(&sched.gcwaiting, 1) 930 preemptall() 931 // stop current P 932 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 933 sched.stopwait-- 934 // try to retake all P's in Psyscall status 935 for i := 0; i < int(gomaxprocs); i++ { 936 p := allp[i] 937 s := p.status 938 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 939 if trace.enabled { 940 traceGoSysBlock(p) 941 traceProcStop(p) 942 } 943 p.syscalltick++ 944 sched.stopwait-- 945 } 946 } 947 // stop idle P's 948 for { 949 p := pidleget() 950 if p == nil { 951 break 952 } 953 p.status = _Pgcstop 954 sched.stopwait-- 955 } 956 wait := sched.stopwait > 0 957 unlock(&sched.lock) 958 959 // wait for remaining P's to stop voluntarily 960 if wait { 961 for { 962 // wait for 100us, then try to re-preempt in case of any races 963 if notetsleep(&sched.stopnote, 100*1000) { 964 noteclear(&sched.stopnote) 965 break 966 } 967 preemptall() 968 } 969 } 970 if sched.stopwait != 0 { 971 throw("stopTheWorld: not stopped") 972 } 973 for i := 0; i < int(gomaxprocs); i++ { 974 p := allp[i] 975 if p.status != _Pgcstop { 976 throw("stopTheWorld: not stopped") 977 } 978 } 979 } 980 981 func mhelpgc() { 982 _g_ := getg() 983 _g_.m.helpgc = -1 984 } 985 986 func startTheWorldWithSema() { 987 _g_ := getg() 988 989 _g_.m.locks++ // disable preemption because it can be holding p in a local var 990 gp := netpoll(false) // non-blocking 991 injectglist(gp) 992 add := needaddgcproc() 993 lock(&sched.lock) 994 995 procs := gomaxprocs 996 if newprocs != 0 { 997 procs = newprocs 998 newprocs = 0 999 } 1000 p1 := procresize(procs) 1001 sched.gcwaiting = 0 1002 if sched.sysmonwait != 0 { 1003 sched.sysmonwait = 0 1004 notewakeup(&sched.sysmonnote) 1005 } 1006 unlock(&sched.lock) 1007 1008 for p1 != nil { 1009 p := p1 1010 p1 = p1.link.ptr() 1011 if p.m != 0 { 1012 mp := p.m.ptr() 1013 p.m = 0 1014 if mp.nextp != 0 { 1015 throw("startTheWorld: inconsistent mp->nextp") 1016 } 1017 mp.nextp.set(p) 1018 notewakeup(&mp.park) 1019 } else { 1020 // Start M to run P. Do not start another M below. 1021 newm(nil, p) 1022 add = false 1023 } 1024 } 1025 1026 // Wakeup an additional proc in case we have excessive runnable goroutines 1027 // in local queues or in the global queue. If we don't, the proc will park itself. 1028 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 1029 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 1030 wakep() 1031 } 1032 1033 if add { 1034 // If GC could have used another helper proc, start one now, 1035 // in the hope that it will be available next time. 1036 // It would have been even better to start it before the collection, 1037 // but doing so requires allocating memory, so it's tricky to 1038 // coordinate. This lazy approach works out in practice: 1039 // we don't mind if the first couple gc rounds don't have quite 1040 // the maximum number of procs. 1041 newm(mhelpgc, nil) 1042 } 1043 _g_.m.locks-- 1044 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1045 _g_.stackguard0 = stackPreempt 1046 } 1047 } 1048 1049 // Called to start an M. 1050 //go:nosplit 1051 func mstart() { 1052 _g_ := getg() 1053 1054 if _g_.stack.lo == 0 { 1055 // Initialize stack bounds from system stack. 1056 // Cgo may have left stack size in stack.hi. 1057 size := _g_.stack.hi 1058 if size == 0 { 1059 size = 8192 * sys.StackGuardMultiplier 1060 } 1061 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1062 _g_.stack.lo = _g_.stack.hi - size + 1024 1063 } 1064 // Initialize stack guards so that we can start calling 1065 // both Go and C functions with stack growth prologues. 1066 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1067 _g_.stackguard1 = _g_.stackguard0 1068 mstart1() 1069 } 1070 1071 func mstart1() { 1072 _g_ := getg() 1073 1074 if _g_ != _g_.m.g0 { 1075 throw("bad runtime·mstart") 1076 } 1077 1078 // Record top of stack for use by mcall. 1079 // Once we call schedule we're never coming back, 1080 // so other calls can reuse this stack space. 1081 gosave(&_g_.m.g0.sched) 1082 _g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used 1083 asminit() 1084 minit() 1085 1086 // Install signal handlers; after minit so that minit can 1087 // prepare the thread to be able to handle the signals. 1088 if _g_.m == &m0 { 1089 // Create an extra M for callbacks on threads not created by Go. 1090 if iscgo && !cgoHasExtraM { 1091 cgoHasExtraM = true 1092 newextram() 1093 } 1094 initsig(false) 1095 } 1096 1097 if fn := _g_.m.mstartfn; fn != nil { 1098 fn() 1099 } 1100 1101 if _g_.m.helpgc != 0 { 1102 _g_.m.helpgc = 0 1103 stopm() 1104 } else if _g_.m != &m0 { 1105 acquirep(_g_.m.nextp.ptr()) 1106 _g_.m.nextp = 0 1107 } 1108 schedule() 1109 } 1110 1111 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1112 // If a P is currently executing code, this will bring the P to a GC 1113 // safe point and execute fn on that P. If the P is not executing code 1114 // (it is idle or in a syscall), this will call fn(p) directly while 1115 // preventing the P from exiting its state. This does not ensure that 1116 // fn will run on every CPU executing Go code, but it acts as a global 1117 // memory barrier. GC uses this as a "ragged barrier." 1118 // 1119 // The caller must hold worldsema. 1120 // 1121 //go:systemstack 1122 func forEachP(fn func(*p)) { 1123 mp := acquirem() 1124 _p_ := getg().m.p.ptr() 1125 1126 lock(&sched.lock) 1127 if sched.safePointWait != 0 { 1128 throw("forEachP: sched.safePointWait != 0") 1129 } 1130 sched.safePointWait = gomaxprocs - 1 1131 sched.safePointFn = fn 1132 1133 // Ask all Ps to run the safe point function. 1134 for _, p := range allp[:gomaxprocs] { 1135 if p != _p_ { 1136 atomic.Store(&p.runSafePointFn, 1) 1137 } 1138 } 1139 preemptall() 1140 1141 // Any P entering _Pidle or _Psyscall from now on will observe 1142 // p.runSafePointFn == 1 and will call runSafePointFn when 1143 // changing its status to _Pidle/_Psyscall. 1144 1145 // Run safe point function for all idle Ps. sched.pidle will 1146 // not change because we hold sched.lock. 1147 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1148 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1149 fn(p) 1150 sched.safePointWait-- 1151 } 1152 } 1153 1154 wait := sched.safePointWait > 0 1155 unlock(&sched.lock) 1156 1157 // Run fn for the current P. 1158 fn(_p_) 1159 1160 // Force Ps currently in _Psyscall into _Pidle and hand them 1161 // off to induce safe point function execution. 1162 for i := 0; i < int(gomaxprocs); i++ { 1163 p := allp[i] 1164 s := p.status 1165 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1166 if trace.enabled { 1167 traceGoSysBlock(p) 1168 traceProcStop(p) 1169 } 1170 p.syscalltick++ 1171 handoffp(p) 1172 } 1173 } 1174 1175 // Wait for remaining Ps to run fn. 1176 if wait { 1177 for { 1178 // Wait for 100us, then try to re-preempt in 1179 // case of any races. 1180 // 1181 // Requires system stack. 1182 if notetsleep(&sched.safePointNote, 100*1000) { 1183 noteclear(&sched.safePointNote) 1184 break 1185 } 1186 preemptall() 1187 } 1188 } 1189 if sched.safePointWait != 0 { 1190 throw("forEachP: not done") 1191 } 1192 for i := 0; i < int(gomaxprocs); i++ { 1193 p := allp[i] 1194 if p.runSafePointFn != 0 { 1195 throw("forEachP: P did not run fn") 1196 } 1197 } 1198 1199 lock(&sched.lock) 1200 sched.safePointFn = nil 1201 unlock(&sched.lock) 1202 releasem(mp) 1203 } 1204 1205 // runSafePointFn runs the safe point function, if any, for this P. 1206 // This should be called like 1207 // 1208 // if getg().m.p.runSafePointFn != 0 { 1209 // runSafePointFn() 1210 // } 1211 // 1212 // runSafePointFn must be checked on any transition in to _Pidle or 1213 // _Psyscall to avoid a race where forEachP sees that the P is running 1214 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1215 // nor the P run the safe-point function. 1216 func runSafePointFn() { 1217 p := getg().m.p.ptr() 1218 // Resolve the race between forEachP running the safe-point 1219 // function on this P's behalf and this P running the 1220 // safe-point function directly. 1221 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1222 return 1223 } 1224 sched.safePointFn(p) 1225 lock(&sched.lock) 1226 sched.safePointWait-- 1227 if sched.safePointWait == 0 { 1228 notewakeup(&sched.safePointNote) 1229 } 1230 unlock(&sched.lock) 1231 } 1232 1233 // When running with cgo, we call _cgo_thread_start 1234 // to start threads for us so that we can play nicely with 1235 // foreign code. 1236 var cgoThreadStart unsafe.Pointer 1237 1238 type cgothreadstart struct { 1239 g guintptr 1240 tls *uint64 1241 fn unsafe.Pointer 1242 } 1243 1244 // Allocate a new m unassociated with any thread. 1245 // Can use p for allocation context if needed. 1246 // fn is recorded as the new m's m.mstartfn. 1247 // 1248 // This function it known to the compiler to inhibit the 1249 // go:nowritebarrierrec annotation because it uses P for allocation. 1250 func allocm(_p_ *p, fn func()) *m { 1251 _g_ := getg() 1252 _g_.m.locks++ // disable GC because it can be called from sysmon 1253 if _g_.m.p == 0 { 1254 acquirep(_p_) // temporarily borrow p for mallocs in this function 1255 } 1256 mp := new(m) 1257 mp.mstartfn = fn 1258 mcommoninit(mp) 1259 1260 // In case of cgo or Solaris, pthread_create will make us a stack. 1261 // Windows and Plan 9 will layout sched stack on OS stack. 1262 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" { 1263 mp.g0 = malg(-1) 1264 } else { 1265 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1266 } 1267 mp.g0.m = mp 1268 1269 if _p_ == _g_.m.p.ptr() { 1270 releasep() 1271 } 1272 _g_.m.locks-- 1273 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1274 _g_.stackguard0 = stackPreempt 1275 } 1276 1277 return mp 1278 } 1279 1280 // needm is called when a cgo callback happens on a 1281 // thread without an m (a thread not created by Go). 1282 // In this case, needm is expected to find an m to use 1283 // and return with m, g initialized correctly. 1284 // Since m and g are not set now (likely nil, but see below) 1285 // needm is limited in what routines it can call. In particular 1286 // it can only call nosplit functions (textflag 7) and cannot 1287 // do any scheduling that requires an m. 1288 // 1289 // In order to avoid needing heavy lifting here, we adopt 1290 // the following strategy: there is a stack of available m's 1291 // that can be stolen. Using compare-and-swap 1292 // to pop from the stack has ABA races, so we simulate 1293 // a lock by doing an exchange (via casp) to steal the stack 1294 // head and replace the top pointer with MLOCKED (1). 1295 // This serves as a simple spin lock that we can use even 1296 // without an m. The thread that locks the stack in this way 1297 // unlocks the stack by storing a valid stack head pointer. 1298 // 1299 // In order to make sure that there is always an m structure 1300 // available to be stolen, we maintain the invariant that there 1301 // is always one more than needed. At the beginning of the 1302 // program (if cgo is in use) the list is seeded with a single m. 1303 // If needm finds that it has taken the last m off the list, its job 1304 // is - once it has installed its own m so that it can do things like 1305 // allocate memory - to create a spare m and put it on the list. 1306 // 1307 // Each of these extra m's also has a g0 and a curg that are 1308 // pressed into service as the scheduling stack and current 1309 // goroutine for the duration of the cgo callback. 1310 // 1311 // When the callback is done with the m, it calls dropm to 1312 // put the m back on the list. 1313 //go:nosplit 1314 func needm(x byte) { 1315 if iscgo && !cgoHasExtraM { 1316 // Can happen if C/C++ code calls Go from a global ctor. 1317 // Can not throw, because scheduler is not initialized yet. 1318 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1319 exit(1) 1320 } 1321 1322 // Lock extra list, take head, unlock popped list. 1323 // nilokay=false is safe here because of the invariant above, 1324 // that the extra list always contains or will soon contain 1325 // at least one m. 1326 mp := lockextra(false) 1327 1328 // Set needextram when we've just emptied the list, 1329 // so that the eventual call into cgocallbackg will 1330 // allocate a new m for the extra list. We delay the 1331 // allocation until then so that it can be done 1332 // after exitsyscall makes sure it is okay to be 1333 // running at all (that is, there's no garbage collection 1334 // running right now). 1335 mp.needextram = mp.schedlink == 0 1336 unlockextra(mp.schedlink.ptr()) 1337 1338 // Save and block signals before installing g. 1339 // Once g is installed, any incoming signals will try to execute, 1340 // but we won't have the sigaltstack settings and other data 1341 // set up appropriately until the end of minit, which will 1342 // unblock the signals. This is the same dance as when 1343 // starting a new m to run Go code via newosproc. 1344 msigsave(mp) 1345 sigblock() 1346 1347 // Install g (= m->g0) and set the stack bounds 1348 // to match the current stack. We don't actually know 1349 // how big the stack is, like we don't know how big any 1350 // scheduling stack is, but we assume there's at least 32 kB, 1351 // which is more than enough for us. 1352 setg(mp.g0) 1353 _g_ := getg() 1354 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 1355 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 1356 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1357 1358 // Initialize this thread to use the m. 1359 asminit() 1360 minit() 1361 } 1362 1363 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 1364 1365 // newextram allocates an m and puts it on the extra list. 1366 // It is called with a working local m, so that it can do things 1367 // like call schedlock and allocate. 1368 func newextram() { 1369 // Create extra goroutine locked to extra m. 1370 // The goroutine is the context in which the cgo callback will run. 1371 // The sched.pc will never be returned to, but setting it to 1372 // goexit makes clear to the traceback routines where 1373 // the goroutine stack ends. 1374 mp := allocm(nil, nil) 1375 gp := malg(4096) 1376 gp.sched.pc = funcPC(goexit) + sys.PCQuantum 1377 gp.sched.sp = gp.stack.hi 1378 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame 1379 gp.sched.lr = 0 1380 gp.sched.g = guintptr(unsafe.Pointer(gp)) 1381 gp.syscallpc = gp.sched.pc 1382 gp.syscallsp = gp.sched.sp 1383 gp.stktopsp = gp.sched.sp 1384 // malg returns status as Gidle, change to Gsyscall before adding to allg 1385 // where GC will see it. 1386 casgstatus(gp, _Gidle, _Gsyscall) 1387 gp.m = mp 1388 mp.curg = gp 1389 mp.locked = _LockInternal 1390 mp.lockedg = gp 1391 gp.lockedm = mp 1392 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 1393 if raceenabled { 1394 gp.racectx = racegostart(funcPC(newextram)) 1395 } 1396 // put on allg for garbage collector 1397 allgadd(gp) 1398 1399 // Add m to the extra list. 1400 mnext := lockextra(true) 1401 mp.schedlink.set(mnext) 1402 unlockextra(mp) 1403 } 1404 1405 // dropm is called when a cgo callback has called needm but is now 1406 // done with the callback and returning back into the non-Go thread. 1407 // It puts the current m back onto the extra list. 1408 // 1409 // The main expense here is the call to signalstack to release the 1410 // m's signal stack, and then the call to needm on the next callback 1411 // from this thread. It is tempting to try to save the m for next time, 1412 // which would eliminate both these costs, but there might not be 1413 // a next time: the current thread (which Go does not control) might exit. 1414 // If we saved the m for that thread, there would be an m leak each time 1415 // such a thread exited. Instead, we acquire and release an m on each 1416 // call. These should typically not be scheduling operations, just a few 1417 // atomics, so the cost should be small. 1418 // 1419 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1420 // variable using pthread_key_create. Unlike the pthread keys we already use 1421 // on OS X, this dummy key would never be read by Go code. It would exist 1422 // only so that we could register at thread-exit-time destructor. 1423 // That destructor would put the m back onto the extra list. 1424 // This is purely a performance optimization. The current version, 1425 // in which dropm happens on each cgo call, is still correct too. 1426 // We may have to keep the current version on systems with cgo 1427 // but without pthreads, like Windows. 1428 func dropm() { 1429 // Clear m and g, and return m to the extra list. 1430 // After the call to setg we can only call nosplit functions 1431 // with no pointer manipulation. 1432 mp := getg().m 1433 1434 // Block signals before unminit. 1435 // Unminit unregisters the signal handling stack (but needs g on some systems). 1436 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 1437 // It's important not to try to handle a signal between those two steps. 1438 sigmask := mp.sigmask 1439 sigblock() 1440 unminit() 1441 1442 mnext := lockextra(true) 1443 mp.schedlink.set(mnext) 1444 1445 setg(nil) 1446 1447 // Commit the release of mp. 1448 unlockextra(mp) 1449 1450 msigrestore(sigmask) 1451 } 1452 1453 // A helper function for EnsureDropM. 1454 func getm() uintptr { 1455 return uintptr(unsafe.Pointer(getg().m)) 1456 } 1457 1458 var extram uintptr 1459 1460 // lockextra locks the extra list and returns the list head. 1461 // The caller must unlock the list by storing a new list head 1462 // to extram. If nilokay is true, then lockextra will 1463 // return a nil list head if that's what it finds. If nilokay is false, 1464 // lockextra will keep waiting until the list head is no longer nil. 1465 //go:nosplit 1466 func lockextra(nilokay bool) *m { 1467 const locked = 1 1468 1469 for { 1470 old := atomic.Loaduintptr(&extram) 1471 if old == locked { 1472 yield := osyield 1473 yield() 1474 continue 1475 } 1476 if old == 0 && !nilokay { 1477 usleep(1) 1478 continue 1479 } 1480 if atomic.Casuintptr(&extram, old, locked) { 1481 return (*m)(unsafe.Pointer(old)) 1482 } 1483 yield := osyield 1484 yield() 1485 continue 1486 } 1487 } 1488 1489 //go:nosplit 1490 func unlockextra(mp *m) { 1491 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1492 } 1493 1494 // Create a new m. It will start off with a call to fn, or else the scheduler. 1495 // fn needs to be static and not a heap allocated closure. 1496 // May run with m.p==nil, so write barriers are not allowed. 1497 //go:nowritebarrier 1498 func newm(fn func(), _p_ *p) { 1499 mp := allocm(_p_, fn) 1500 mp.nextp.set(_p_) 1501 mp.sigmask = initSigmask 1502 if iscgo { 1503 var ts cgothreadstart 1504 if _cgo_thread_start == nil { 1505 throw("_cgo_thread_start missing") 1506 } 1507 ts.g.set(mp.g0) 1508 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1509 ts.fn = unsafe.Pointer(funcPC(mstart)) 1510 if msanenabled { 1511 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 1512 } 1513 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1514 return 1515 } 1516 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi)) 1517 } 1518 1519 // Stops execution of the current m until new work is available. 1520 // Returns with acquired P. 1521 func stopm() { 1522 _g_ := getg() 1523 1524 if _g_.m.locks != 0 { 1525 throw("stopm holding locks") 1526 } 1527 if _g_.m.p != 0 { 1528 throw("stopm holding p") 1529 } 1530 if _g_.m.spinning { 1531 throw("stopm spinning") 1532 } 1533 1534 retry: 1535 lock(&sched.lock) 1536 mput(_g_.m) 1537 unlock(&sched.lock) 1538 notesleep(&_g_.m.park) 1539 noteclear(&_g_.m.park) 1540 if _g_.m.helpgc != 0 { 1541 gchelper() 1542 _g_.m.helpgc = 0 1543 _g_.m.mcache = nil 1544 _g_.m.p = 0 1545 goto retry 1546 } 1547 acquirep(_g_.m.nextp.ptr()) 1548 _g_.m.nextp = 0 1549 } 1550 1551 func mspinning() { 1552 // startm's caller incremented nmspinning. Set the new M's spinning. 1553 getg().m.spinning = true 1554 } 1555 1556 // Schedules some M to run the p (creates an M if necessary). 1557 // If p==nil, tries to get an idle P, if no idle P's does nothing. 1558 // May run with m.p==nil, so write barriers are not allowed. 1559 // If spinning is set, the caller has incremented nmspinning and startm will 1560 // either decrement nmspinning or set m.spinning in the newly started M. 1561 //go:nowritebarrier 1562 func startm(_p_ *p, spinning bool) { 1563 lock(&sched.lock) 1564 if _p_ == nil { 1565 _p_ = pidleget() 1566 if _p_ == nil { 1567 unlock(&sched.lock) 1568 if spinning { 1569 // The caller incremented nmspinning, but there are no idle Ps, 1570 // so it's okay to just undo the increment and give up. 1571 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1572 throw("startm: negative nmspinning") 1573 } 1574 } 1575 return 1576 } 1577 } 1578 mp := mget() 1579 unlock(&sched.lock) 1580 if mp == nil { 1581 var fn func() 1582 if spinning { 1583 // The caller incremented nmspinning, so set m.spinning in the new M. 1584 fn = mspinning 1585 } 1586 newm(fn, _p_) 1587 return 1588 } 1589 if mp.spinning { 1590 throw("startm: m is spinning") 1591 } 1592 if mp.nextp != 0 { 1593 throw("startm: m has p") 1594 } 1595 if spinning && !runqempty(_p_) { 1596 throw("startm: p has runnable gs") 1597 } 1598 // The caller incremented nmspinning, so set m.spinning in the new M. 1599 mp.spinning = spinning 1600 mp.nextp.set(_p_) 1601 notewakeup(&mp.park) 1602 } 1603 1604 // Hands off P from syscall or locked M. 1605 // Always runs without a P, so write barriers are not allowed. 1606 //go:nowritebarrier 1607 func handoffp(_p_ *p) { 1608 // handoffp must start an M in any situation where 1609 // findrunnable would return a G to run on _p_. 1610 1611 // if it has local work, start it straight away 1612 if !runqempty(_p_) || sched.runqsize != 0 { 1613 startm(_p_, false) 1614 return 1615 } 1616 // if it has GC work, start it straight away 1617 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 1618 startm(_p_, false) 1619 return 1620 } 1621 // no local work, check that there are no spinning/idle M's, 1622 // otherwise our help is not required 1623 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 1624 startm(_p_, true) 1625 return 1626 } 1627 lock(&sched.lock) 1628 if sched.gcwaiting != 0 { 1629 _p_.status = _Pgcstop 1630 sched.stopwait-- 1631 if sched.stopwait == 0 { 1632 notewakeup(&sched.stopnote) 1633 } 1634 unlock(&sched.lock) 1635 return 1636 } 1637 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 1638 sched.safePointFn(_p_) 1639 sched.safePointWait-- 1640 if sched.safePointWait == 0 { 1641 notewakeup(&sched.safePointNote) 1642 } 1643 } 1644 if sched.runqsize != 0 { 1645 unlock(&sched.lock) 1646 startm(_p_, false) 1647 return 1648 } 1649 // If this is the last running P and nobody is polling network, 1650 // need to wakeup another M to poll network. 1651 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 1652 unlock(&sched.lock) 1653 startm(_p_, false) 1654 return 1655 } 1656 pidleput(_p_) 1657 unlock(&sched.lock) 1658 } 1659 1660 // Tries to add one more P to execute G's. 1661 // Called when a G is made runnable (newproc, ready). 1662 func wakep() { 1663 // be conservative about spinning threads 1664 if !atomic.Cas(&sched.nmspinning, 0, 1) { 1665 return 1666 } 1667 startm(nil, true) 1668 } 1669 1670 // Stops execution of the current m that is locked to a g until the g is runnable again. 1671 // Returns with acquired P. 1672 func stoplockedm() { 1673 _g_ := getg() 1674 1675 if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m { 1676 throw("stoplockedm: inconsistent locking") 1677 } 1678 if _g_.m.p != 0 { 1679 // Schedule another M to run this p. 1680 _p_ := releasep() 1681 handoffp(_p_) 1682 } 1683 incidlelocked(1) 1684 // Wait until another thread schedules lockedg again. 1685 notesleep(&_g_.m.park) 1686 noteclear(&_g_.m.park) 1687 status := readgstatus(_g_.m.lockedg) 1688 if status&^_Gscan != _Grunnable { 1689 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 1690 dumpgstatus(_g_) 1691 throw("stoplockedm: not runnable") 1692 } 1693 acquirep(_g_.m.nextp.ptr()) 1694 _g_.m.nextp = 0 1695 } 1696 1697 // Schedules the locked m to run the locked gp. 1698 // May run during STW, so write barriers are not allowed. 1699 //go:nowritebarrier 1700 func startlockedm(gp *g) { 1701 _g_ := getg() 1702 1703 mp := gp.lockedm 1704 if mp == _g_.m { 1705 throw("startlockedm: locked to me") 1706 } 1707 if mp.nextp != 0 { 1708 throw("startlockedm: m has p") 1709 } 1710 // directly handoff current P to the locked m 1711 incidlelocked(-1) 1712 _p_ := releasep() 1713 mp.nextp.set(_p_) 1714 notewakeup(&mp.park) 1715 stopm() 1716 } 1717 1718 // Stops the current m for stopTheWorld. 1719 // Returns when the world is restarted. 1720 func gcstopm() { 1721 _g_ := getg() 1722 1723 if sched.gcwaiting == 0 { 1724 throw("gcstopm: not waiting for gc") 1725 } 1726 if _g_.m.spinning { 1727 _g_.m.spinning = false 1728 // OK to just drop nmspinning here, 1729 // startTheWorld will unpark threads as necessary. 1730 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1731 throw("gcstopm: negative nmspinning") 1732 } 1733 } 1734 _p_ := releasep() 1735 lock(&sched.lock) 1736 _p_.status = _Pgcstop 1737 sched.stopwait-- 1738 if sched.stopwait == 0 { 1739 notewakeup(&sched.stopnote) 1740 } 1741 unlock(&sched.lock) 1742 stopm() 1743 } 1744 1745 // Schedules gp to run on the current M. 1746 // If inheritTime is true, gp inherits the remaining time in the 1747 // current time slice. Otherwise, it starts a new time slice. 1748 // Never returns. 1749 func execute(gp *g, inheritTime bool) { 1750 _g_ := getg() 1751 1752 casgstatus(gp, _Grunnable, _Grunning) 1753 gp.waitsince = 0 1754 gp.preempt = false 1755 gp.stackguard0 = gp.stack.lo + _StackGuard 1756 if !inheritTime { 1757 _g_.m.p.ptr().schedtick++ 1758 } 1759 _g_.m.curg = gp 1760 gp.m = _g_.m 1761 1762 // Check whether the profiler needs to be turned on or off. 1763 hz := sched.profilehz 1764 if _g_.m.profilehz != hz { 1765 resetcpuprofiler(hz) 1766 } 1767 1768 if trace.enabled { 1769 // GoSysExit has to happen when we have a P, but before GoStart. 1770 // So we emit it here. 1771 if gp.syscallsp != 0 && gp.sysblocktraced { 1772 // Since gp.sysblocktraced is true, we must emit an event. 1773 // There is a race between the code that initializes sysexitseq 1774 // and sysexitticks (in exitsyscall, which runs without a P, 1775 // and therefore is not stopped with the rest of the world) 1776 // and the code that initializes a new trace. 1777 // The recorded sysexitseq and sysexitticks must therefore 1778 // be treated as "best effort". If they are valid for this trace, 1779 // then great, use them for greater accuracy. 1780 // But if they're not valid for this trace, assume that the 1781 // trace was started after the actual syscall exit (but before 1782 // we actually managed to start the goroutine, aka right now), 1783 // and assign a fresh time stamp to keep the log consistent. 1784 seq, ts := gp.sysexitseq, gp.sysexitticks 1785 if seq == 0 || int64(seq)-int64(trace.seqStart) < 0 { 1786 seq, ts = tracestamp() 1787 } 1788 traceGoSysExit(seq, ts) 1789 } 1790 traceGoStart() 1791 } 1792 1793 gogo(&gp.sched) 1794 } 1795 1796 // Finds a runnable goroutine to execute. 1797 // Tries to steal from other P's, get g from global queue, poll network. 1798 func findrunnable() (gp *g, inheritTime bool) { 1799 _g_ := getg() 1800 1801 // The conditions here and in handoffp must agree: if 1802 // findrunnable would return a G to run, handoffp must start 1803 // an M. 1804 1805 top: 1806 if sched.gcwaiting != 0 { 1807 gcstopm() 1808 goto top 1809 } 1810 if _g_.m.p.ptr().runSafePointFn != 0 { 1811 runSafePointFn() 1812 } 1813 if fingwait && fingwake { 1814 if gp := wakefing(); gp != nil { 1815 ready(gp, 0) 1816 } 1817 } 1818 1819 // local runq 1820 if gp, inheritTime := runqget(_g_.m.p.ptr()); gp != nil { 1821 return gp, inheritTime 1822 } 1823 1824 // global runq 1825 if sched.runqsize != 0 { 1826 lock(&sched.lock) 1827 gp := globrunqget(_g_.m.p.ptr(), 0) 1828 unlock(&sched.lock) 1829 if gp != nil { 1830 return gp, false 1831 } 1832 } 1833 1834 // Poll network. 1835 // This netpoll is only an optimization before we resort to stealing. 1836 // We can safely skip it if there a thread blocked in netpoll already. 1837 // If there is any kind of logical race with that blocked thread 1838 // (e.g. it has already returned from netpoll, but does not set lastpoll yet), 1839 // this thread will do blocking netpoll below anyway. 1840 if netpollinited() && sched.lastpoll != 0 { 1841 if gp := netpoll(false); gp != nil { // non-blocking 1842 // netpoll returns list of goroutines linked by schedlink. 1843 injectglist(gp.schedlink.ptr()) 1844 casgstatus(gp, _Gwaiting, _Grunnable) 1845 if trace.enabled { 1846 traceGoUnpark(gp, 0) 1847 } 1848 return gp, false 1849 } 1850 } 1851 1852 // If number of spinning M's >= number of busy P's, block. 1853 // This is necessary to prevent excessive CPU consumption 1854 // when GOMAXPROCS>>1 but the program parallelism is low. 1855 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= uint32(gomaxprocs)-atomic.Load(&sched.npidle) { // TODO: fast atomic 1856 goto stop 1857 } 1858 if !_g_.m.spinning { 1859 _g_.m.spinning = true 1860 atomic.Xadd(&sched.nmspinning, 1) 1861 } 1862 // random steal from other P's 1863 for i := 0; i < int(4*gomaxprocs); i++ { 1864 if sched.gcwaiting != 0 { 1865 goto top 1866 } 1867 _p_ := allp[fastrand1()%uint32(gomaxprocs)] 1868 var gp *g 1869 if _p_ == _g_.m.p.ptr() { 1870 gp, _ = runqget(_p_) 1871 } else { 1872 stealRunNextG := i > 2*int(gomaxprocs) // first look for ready queues with more than 1 g 1873 gp = runqsteal(_g_.m.p.ptr(), _p_, stealRunNextG) 1874 } 1875 if gp != nil { 1876 return gp, false 1877 } 1878 } 1879 1880 stop: 1881 1882 // We have nothing to do. If we're in the GC mark phase, can 1883 // safely scan and blacken objects, and have work to do, run 1884 // idle-time marking rather than give up the P. 1885 if _p_ := _g_.m.p.ptr(); gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) { 1886 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 1887 gp := _p_.gcBgMarkWorker.ptr() 1888 casgstatus(gp, _Gwaiting, _Grunnable) 1889 if trace.enabled { 1890 traceGoUnpark(gp, 0) 1891 } 1892 return gp, false 1893 } 1894 1895 // return P and block 1896 lock(&sched.lock) 1897 if sched.gcwaiting != 0 || _g_.m.p.ptr().runSafePointFn != 0 { 1898 unlock(&sched.lock) 1899 goto top 1900 } 1901 if sched.runqsize != 0 { 1902 gp := globrunqget(_g_.m.p.ptr(), 0) 1903 unlock(&sched.lock) 1904 return gp, false 1905 } 1906 _p_ := releasep() 1907 pidleput(_p_) 1908 unlock(&sched.lock) 1909 1910 // Delicate dance: thread transitions from spinning to non-spinning state, 1911 // potentially concurrently with submission of new goroutines. We must 1912 // drop nmspinning first and then check all per-P queues again (with 1913 // #StoreLoad memory barrier in between). If we do it the other way around, 1914 // another thread can submit a goroutine after we've checked all run queues 1915 // but before we drop nmspinning; as the result nobody will unpark a thread 1916 // to run the goroutine. 1917 // If we discover new work below, we need to restore m.spinning as a signal 1918 // for resetspinning to unpark a new worker thread (because there can be more 1919 // than one starving goroutine). However, if after discovering new work 1920 // we also observe no idle Ps, it is OK to just park the current thread: 1921 // the system is fully loaded so no spinning threads are required. 1922 // Also see "Worker thread parking/unparking" comment at the top of the file. 1923 wasSpinning := _g_.m.spinning 1924 if _g_.m.spinning { 1925 _g_.m.spinning = false 1926 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1927 throw("findrunnable: negative nmspinning") 1928 } 1929 } 1930 1931 // check all runqueues once again 1932 for i := 0; i < int(gomaxprocs); i++ { 1933 _p_ := allp[i] 1934 if _p_ != nil && !runqempty(_p_) { 1935 lock(&sched.lock) 1936 _p_ = pidleget() 1937 unlock(&sched.lock) 1938 if _p_ != nil { 1939 acquirep(_p_) 1940 if wasSpinning { 1941 _g_.m.spinning = true 1942 atomic.Xadd(&sched.nmspinning, 1) 1943 } 1944 goto top 1945 } 1946 break 1947 } 1948 } 1949 1950 // poll network 1951 if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 1952 if _g_.m.p != 0 { 1953 throw("findrunnable: netpoll with p") 1954 } 1955 if _g_.m.spinning { 1956 throw("findrunnable: netpoll with spinning") 1957 } 1958 gp := netpoll(true) // block until new work is available 1959 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 1960 if gp != nil { 1961 lock(&sched.lock) 1962 _p_ = pidleget() 1963 unlock(&sched.lock) 1964 if _p_ != nil { 1965 acquirep(_p_) 1966 injectglist(gp.schedlink.ptr()) 1967 casgstatus(gp, _Gwaiting, _Grunnable) 1968 if trace.enabled { 1969 traceGoUnpark(gp, 0) 1970 } 1971 return gp, false 1972 } 1973 injectglist(gp) 1974 } 1975 } 1976 stopm() 1977 goto top 1978 } 1979 1980 func resetspinning() { 1981 _g_ := getg() 1982 if !_g_.m.spinning { 1983 throw("resetspinning: not a spinning m") 1984 } 1985 _g_.m.spinning = false 1986 nmspinning := atomic.Xadd(&sched.nmspinning, -1) 1987 if int32(nmspinning) < 0 { 1988 throw("findrunnable: negative nmspinning") 1989 } 1990 // M wakeup policy is deliberately somewhat conservative, so check if we 1991 // need to wakeup another P here. See "Worker thread parking/unparking" 1992 // comment at the top of the file for details. 1993 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 { 1994 wakep() 1995 } 1996 } 1997 1998 // Injects the list of runnable G's into the scheduler. 1999 // Can run concurrently with GC. 2000 func injectglist(glist *g) { 2001 if glist == nil { 2002 return 2003 } 2004 if trace.enabled { 2005 for gp := glist; gp != nil; gp = gp.schedlink.ptr() { 2006 traceGoUnpark(gp, 0) 2007 } 2008 } 2009 lock(&sched.lock) 2010 var n int 2011 for n = 0; glist != nil; n++ { 2012 gp := glist 2013 glist = gp.schedlink.ptr() 2014 casgstatus(gp, _Gwaiting, _Grunnable) 2015 globrunqput(gp) 2016 } 2017 unlock(&sched.lock) 2018 for ; n != 0 && sched.npidle != 0; n-- { 2019 startm(nil, false) 2020 } 2021 } 2022 2023 // One round of scheduler: find a runnable goroutine and execute it. 2024 // Never returns. 2025 func schedule() { 2026 _g_ := getg() 2027 2028 if _g_.m.locks != 0 { 2029 throw("schedule: holding locks") 2030 } 2031 2032 if _g_.m.lockedg != nil { 2033 stoplockedm() 2034 execute(_g_.m.lockedg, false) // Never returns. 2035 } 2036 2037 top: 2038 if sched.gcwaiting != 0 { 2039 gcstopm() 2040 goto top 2041 } 2042 if _g_.m.p.ptr().runSafePointFn != 0 { 2043 runSafePointFn() 2044 } 2045 2046 var gp *g 2047 var inheritTime bool 2048 if trace.enabled || trace.shutdown { 2049 gp = traceReader() 2050 if gp != nil { 2051 casgstatus(gp, _Gwaiting, _Grunnable) 2052 traceGoUnpark(gp, 0) 2053 } 2054 } 2055 if gp == nil && gcBlackenEnabled != 0 { 2056 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 2057 } 2058 if gp == nil { 2059 // Check the global runnable queue once in a while to ensure fairness. 2060 // Otherwise two goroutines can completely occupy the local runqueue 2061 // by constantly respawning each other. 2062 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 2063 lock(&sched.lock) 2064 gp = globrunqget(_g_.m.p.ptr(), 1) 2065 unlock(&sched.lock) 2066 } 2067 } 2068 if gp == nil { 2069 gp, inheritTime = runqget(_g_.m.p.ptr()) 2070 if gp != nil && _g_.m.spinning { 2071 throw("schedule: spinning with local work") 2072 } 2073 } 2074 if gp == nil { 2075 gp, inheritTime = findrunnable() // blocks until work is available 2076 } 2077 2078 // This thread is going to run a goroutine and is not spinning anymore, 2079 // so if it was marked as spinning we need to reset it now and potentially 2080 // start a new spinning M. 2081 if _g_.m.spinning { 2082 resetspinning() 2083 } 2084 2085 if gp.lockedm != nil { 2086 // Hands off own p to the locked m, 2087 // then blocks waiting for a new p. 2088 startlockedm(gp) 2089 goto top 2090 } 2091 2092 execute(gp, inheritTime) 2093 } 2094 2095 // dropg removes the association between m and the current goroutine m->curg (gp for short). 2096 // Typically a caller sets gp's status away from Grunning and then 2097 // immediately calls dropg to finish the job. The caller is also responsible 2098 // for arranging that gp will be restarted using ready at an 2099 // appropriate time. After calling dropg and arranging for gp to be 2100 // readied later, the caller can do other work but eventually should 2101 // call schedule to restart the scheduling of goroutines on this m. 2102 func dropg() { 2103 _g_ := getg() 2104 2105 if _g_.m.lockedg == nil { 2106 _g_.m.curg.m = nil 2107 _g_.m.curg = nil 2108 } 2109 } 2110 2111 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 2112 unlock((*mutex)(lock)) 2113 return true 2114 } 2115 2116 // park continuation on g0. 2117 func park_m(gp *g) { 2118 _g_ := getg() 2119 2120 if trace.enabled { 2121 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp) 2122 } 2123 2124 casgstatus(gp, _Grunning, _Gwaiting) 2125 dropg() 2126 2127 if _g_.m.waitunlockf != nil { 2128 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf)) 2129 ok := fn(gp, _g_.m.waitlock) 2130 _g_.m.waitunlockf = nil 2131 _g_.m.waitlock = nil 2132 if !ok { 2133 if trace.enabled { 2134 traceGoUnpark(gp, 2) 2135 } 2136 casgstatus(gp, _Gwaiting, _Grunnable) 2137 execute(gp, true) // Schedule it back, never returns. 2138 } 2139 } 2140 schedule() 2141 } 2142 2143 func goschedImpl(gp *g) { 2144 status := readgstatus(gp) 2145 if status&^_Gscan != _Grunning { 2146 dumpgstatus(gp) 2147 throw("bad g status") 2148 } 2149 casgstatus(gp, _Grunning, _Grunnable) 2150 dropg() 2151 lock(&sched.lock) 2152 globrunqput(gp) 2153 unlock(&sched.lock) 2154 2155 schedule() 2156 } 2157 2158 // Gosched continuation on g0. 2159 func gosched_m(gp *g) { 2160 if trace.enabled { 2161 traceGoSched() 2162 } 2163 goschedImpl(gp) 2164 } 2165 2166 func gopreempt_m(gp *g) { 2167 if trace.enabled { 2168 traceGoPreempt() 2169 } 2170 goschedImpl(gp) 2171 } 2172 2173 // Finishes execution of the current goroutine. 2174 func goexit1() { 2175 if raceenabled { 2176 racegoend() 2177 } 2178 if trace.enabled { 2179 traceGoEnd() 2180 } 2181 mcall(goexit0) 2182 } 2183 2184 // goexit continuation on g0. 2185 func goexit0(gp *g) { 2186 _g_ := getg() 2187 2188 casgstatus(gp, _Grunning, _Gdead) 2189 if isSystemGoroutine(gp) { 2190 atomic.Xadd(&sched.ngsys, -1) 2191 } 2192 gp.m = nil 2193 gp.lockedm = nil 2194 _g_.m.lockedg = nil 2195 gp.paniconfault = false 2196 gp._defer = nil // should be true already but just in case. 2197 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 2198 gp.writebuf = nil 2199 gp.waitreason = "" 2200 gp.param = nil 2201 2202 dropg() 2203 2204 if _g_.m.locked&^_LockExternal != 0 { 2205 print("invalid m->locked = ", _g_.m.locked, "\n") 2206 throw("internal lockOSThread error") 2207 } 2208 _g_.m.locked = 0 2209 gfput(_g_.m.p.ptr(), gp) 2210 schedule() 2211 } 2212 2213 //go:nosplit 2214 //go:nowritebarrier 2215 func save(pc, sp uintptr) { 2216 _g_ := getg() 2217 2218 _g_.sched.pc = pc 2219 _g_.sched.sp = sp 2220 _g_.sched.lr = 0 2221 _g_.sched.ret = 0 2222 _g_.sched.ctxt = nil 2223 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 2224 } 2225 2226 // The goroutine g is about to enter a system call. 2227 // Record that it's not using the cpu anymore. 2228 // This is called only from the go syscall library and cgocall, 2229 // not from the low-level system calls used by the runtime. 2230 // 2231 // Entersyscall cannot split the stack: the gosave must 2232 // make g->sched refer to the caller's stack segment, because 2233 // entersyscall is going to return immediately after. 2234 // 2235 // Nothing entersyscall calls can split the stack either. 2236 // We cannot safely move the stack during an active call to syscall, 2237 // because we do not know which of the uintptr arguments are 2238 // really pointers (back into the stack). 2239 // In practice, this means that we make the fast path run through 2240 // entersyscall doing no-split things, and the slow path has to use systemstack 2241 // to run bigger things on the system stack. 2242 // 2243 // reentersyscall is the entry point used by cgo callbacks, where explicitly 2244 // saved SP and PC are restored. This is needed when exitsyscall will be called 2245 // from a function further up in the call stack than the parent, as g->syscallsp 2246 // must always point to a valid stack frame. entersyscall below is the normal 2247 // entry point for syscalls, which obtains the SP and PC from the caller. 2248 // 2249 // Syscall tracing: 2250 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 2251 // If the syscall does not block, that is it, we do not emit any other events. 2252 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 2253 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 2254 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 2255 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 2256 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 2257 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 2258 // and we wait for the increment before emitting traceGoSysExit. 2259 // Note that the increment is done even if tracing is not enabled, 2260 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 2261 // 2262 //go:nosplit 2263 func reentersyscall(pc, sp uintptr) { 2264 _g_ := getg() 2265 2266 // Disable preemption because during this function g is in Gsyscall status, 2267 // but can have inconsistent g->sched, do not let GC observe it. 2268 _g_.m.locks++ 2269 2270 // Entersyscall must not call any function that might split/grow the stack. 2271 // (See details in comment above.) 2272 // Catch calls that might, by replacing the stack guard with something that 2273 // will trip any stack check and leaving a flag to tell newstack to die. 2274 _g_.stackguard0 = stackPreempt 2275 _g_.throwsplit = true 2276 2277 // Leave SP around for GC and traceback. 2278 save(pc, sp) 2279 _g_.syscallsp = sp 2280 _g_.syscallpc = pc 2281 casgstatus(_g_, _Grunning, _Gsyscall) 2282 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2283 systemstack(func() { 2284 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2285 throw("entersyscall") 2286 }) 2287 } 2288 2289 if trace.enabled { 2290 systemstack(traceGoSysCall) 2291 // systemstack itself clobbers g.sched.{pc,sp} and we might 2292 // need them later when the G is genuinely blocked in a 2293 // syscall 2294 save(pc, sp) 2295 } 2296 2297 if atomic.Load(&sched.sysmonwait) != 0 { // TODO: fast atomic 2298 systemstack(entersyscall_sysmon) 2299 save(pc, sp) 2300 } 2301 2302 if _g_.m.p.ptr().runSafePointFn != 0 { 2303 // runSafePointFn may stack split if run on this stack 2304 systemstack(runSafePointFn) 2305 save(pc, sp) 2306 } 2307 2308 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2309 _g_.sysblocktraced = true 2310 _g_.m.mcache = nil 2311 _g_.m.p.ptr().m = 0 2312 atomic.Store(&_g_.m.p.ptr().status, _Psyscall) 2313 if sched.gcwaiting != 0 { 2314 systemstack(entersyscall_gcwait) 2315 save(pc, sp) 2316 } 2317 2318 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). 2319 // We set _StackGuard to StackPreempt so that first split stack check calls morestack. 2320 // Morestack detects this case and throws. 2321 _g_.stackguard0 = stackPreempt 2322 _g_.m.locks-- 2323 } 2324 2325 // Standard syscall entry used by the go syscall library and normal cgo calls. 2326 //go:nosplit 2327 func entersyscall(dummy int32) { 2328 reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2329 } 2330 2331 func entersyscall_sysmon() { 2332 lock(&sched.lock) 2333 if atomic.Load(&sched.sysmonwait) != 0 { 2334 atomic.Store(&sched.sysmonwait, 0) 2335 notewakeup(&sched.sysmonnote) 2336 } 2337 unlock(&sched.lock) 2338 } 2339 2340 func entersyscall_gcwait() { 2341 _g_ := getg() 2342 _p_ := _g_.m.p.ptr() 2343 2344 lock(&sched.lock) 2345 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 2346 if trace.enabled { 2347 traceGoSysBlock(_p_) 2348 traceProcStop(_p_) 2349 } 2350 _p_.syscalltick++ 2351 if sched.stopwait--; sched.stopwait == 0 { 2352 notewakeup(&sched.stopnote) 2353 } 2354 } 2355 unlock(&sched.lock) 2356 } 2357 2358 // The same as entersyscall(), but with a hint that the syscall is blocking. 2359 //go:nosplit 2360 func entersyscallblock(dummy int32) { 2361 _g_ := getg() 2362 2363 _g_.m.locks++ // see comment in entersyscall 2364 _g_.throwsplit = true 2365 _g_.stackguard0 = stackPreempt // see comment in entersyscall 2366 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2367 _g_.sysblocktraced = true 2368 _g_.m.p.ptr().syscalltick++ 2369 2370 // Leave SP around for GC and traceback. 2371 pc := getcallerpc(unsafe.Pointer(&dummy)) 2372 sp := getcallersp(unsafe.Pointer(&dummy)) 2373 save(pc, sp) 2374 _g_.syscallsp = _g_.sched.sp 2375 _g_.syscallpc = _g_.sched.pc 2376 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2377 sp1 := sp 2378 sp2 := _g_.sched.sp 2379 sp3 := _g_.syscallsp 2380 systemstack(func() { 2381 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2382 throw("entersyscallblock") 2383 }) 2384 } 2385 casgstatus(_g_, _Grunning, _Gsyscall) 2386 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2387 systemstack(func() { 2388 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2389 throw("entersyscallblock") 2390 }) 2391 } 2392 2393 systemstack(entersyscallblock_handoff) 2394 2395 // Resave for traceback during blocked call. 2396 save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy))) 2397 2398 _g_.m.locks-- 2399 } 2400 2401 func entersyscallblock_handoff() { 2402 if trace.enabled { 2403 traceGoSysCall() 2404 traceGoSysBlock(getg().m.p.ptr()) 2405 } 2406 handoffp(releasep()) 2407 } 2408 2409 // The goroutine g exited its system call. 2410 // Arrange for it to run on a cpu again. 2411 // This is called only from the go syscall library, not 2412 // from the low-level system calls used by the 2413 //go:nosplit 2414 func exitsyscall(dummy int32) { 2415 _g_ := getg() 2416 2417 _g_.m.locks++ // see comment in entersyscall 2418 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp { 2419 throw("exitsyscall: syscall frame is no longer valid") 2420 } 2421 2422 _g_.waitsince = 0 2423 oldp := _g_.m.p.ptr() 2424 if exitsyscallfast() { 2425 if _g_.m.mcache == nil { 2426 throw("lost mcache") 2427 } 2428 if trace.enabled { 2429 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2430 systemstack(traceGoStart) 2431 } 2432 } 2433 // There's a cpu for us, so we can run. 2434 _g_.m.p.ptr().syscalltick++ 2435 // We need to cas the status and scan before resuming... 2436 casgstatus(_g_, _Gsyscall, _Grunning) 2437 2438 // Garbage collector isn't running (since we are), 2439 // so okay to clear syscallsp. 2440 _g_.syscallsp = 0 2441 _g_.m.locks-- 2442 if _g_.preempt { 2443 // restore the preemption request in case we've cleared it in newstack 2444 _g_.stackguard0 = stackPreempt 2445 } else { 2446 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 2447 _g_.stackguard0 = _g_.stack.lo + _StackGuard 2448 } 2449 _g_.throwsplit = false 2450 return 2451 } 2452 2453 _g_.sysexitticks = 0 2454 _g_.sysexitseq = 0 2455 if trace.enabled { 2456 // Wait till traceGoSysBlock event is emitted. 2457 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2458 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 2459 osyield() 2460 } 2461 // We can't trace syscall exit right now because we don't have a P. 2462 // Tracing code can invoke write barriers that cannot run without a P. 2463 // So instead we remember the syscall exit time and emit the event 2464 // in execute when we have a P. 2465 _g_.sysexitseq, _g_.sysexitticks = tracestamp() 2466 } 2467 2468 _g_.m.locks-- 2469 2470 // Call the scheduler. 2471 mcall(exitsyscall0) 2472 2473 if _g_.m.mcache == nil { 2474 throw("lost mcache") 2475 } 2476 2477 // Scheduler returned, so we're allowed to run now. 2478 // Delete the syscallsp information that we left for 2479 // the garbage collector during the system call. 2480 // Must wait until now because until gosched returns 2481 // we don't know for sure that the garbage collector 2482 // is not running. 2483 _g_.syscallsp = 0 2484 _g_.m.p.ptr().syscalltick++ 2485 _g_.throwsplit = false 2486 } 2487 2488 //go:nosplit 2489 func exitsyscallfast() bool { 2490 _g_ := getg() 2491 2492 // Freezetheworld sets stopwait but does not retake P's. 2493 if sched.stopwait == freezeStopWait { 2494 _g_.m.mcache = nil 2495 _g_.m.p = 0 2496 return false 2497 } 2498 2499 // Try to re-acquire the last P. 2500 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) { 2501 // There's a cpu for us, so we can run. 2502 _g_.m.mcache = _g_.m.p.ptr().mcache 2503 _g_.m.p.ptr().m.set(_g_.m) 2504 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2505 if trace.enabled { 2506 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 2507 // traceGoSysBlock for this syscall was already emitted, 2508 // but here we effectively retake the p from the new syscall running on the same p. 2509 systemstack(func() { 2510 // Denote blocking of the new syscall. 2511 traceGoSysBlock(_g_.m.p.ptr()) 2512 // Denote completion of the current syscall. 2513 traceGoSysExit(tracestamp()) 2514 }) 2515 } 2516 _g_.m.p.ptr().syscalltick++ 2517 } 2518 return true 2519 } 2520 2521 // Try to get any other idle P. 2522 oldp := _g_.m.p.ptr() 2523 _g_.m.mcache = nil 2524 _g_.m.p = 0 2525 if sched.pidle != 0 { 2526 var ok bool 2527 systemstack(func() { 2528 ok = exitsyscallfast_pidle() 2529 if ok && trace.enabled { 2530 if oldp != nil { 2531 // Wait till traceGoSysBlock event is emitted. 2532 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2533 for oldp.syscalltick == _g_.m.syscalltick { 2534 osyield() 2535 } 2536 } 2537 traceGoSysExit(tracestamp()) 2538 } 2539 }) 2540 if ok { 2541 return true 2542 } 2543 } 2544 return false 2545 } 2546 2547 func exitsyscallfast_pidle() bool { 2548 lock(&sched.lock) 2549 _p_ := pidleget() 2550 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 2551 atomic.Store(&sched.sysmonwait, 0) 2552 notewakeup(&sched.sysmonnote) 2553 } 2554 unlock(&sched.lock) 2555 if _p_ != nil { 2556 acquirep(_p_) 2557 return true 2558 } 2559 return false 2560 } 2561 2562 // exitsyscall slow path on g0. 2563 // Failed to acquire P, enqueue gp as runnable. 2564 func exitsyscall0(gp *g) { 2565 _g_ := getg() 2566 2567 casgstatus(gp, _Gsyscall, _Grunnable) 2568 dropg() 2569 lock(&sched.lock) 2570 _p_ := pidleget() 2571 if _p_ == nil { 2572 globrunqput(gp) 2573 } else if atomic.Load(&sched.sysmonwait) != 0 { 2574 atomic.Store(&sched.sysmonwait, 0) 2575 notewakeup(&sched.sysmonnote) 2576 } 2577 unlock(&sched.lock) 2578 if _p_ != nil { 2579 acquirep(_p_) 2580 execute(gp, false) // Never returns. 2581 } 2582 if _g_.m.lockedg != nil { 2583 // Wait until another thread schedules gp and so m again. 2584 stoplockedm() 2585 execute(gp, false) // Never returns. 2586 } 2587 stopm() 2588 schedule() // Never returns. 2589 } 2590 2591 func beforefork() { 2592 gp := getg().m.curg 2593 2594 // Fork can hang if preempted with signals frequently enough (see issue 5517). 2595 // Ensure that we stay on the same M where we disable profiling. 2596 gp.m.locks++ 2597 if gp.m.profilehz != 0 { 2598 resetcpuprofiler(0) 2599 } 2600 2601 // This function is called before fork in syscall package. 2602 // Code between fork and exec must not allocate memory nor even try to grow stack. 2603 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 2604 // runtime_AfterFork will undo this in parent process, but not in child. 2605 gp.stackguard0 = stackFork 2606 } 2607 2608 // Called from syscall package before fork. 2609 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 2610 //go:nosplit 2611 func syscall_runtime_BeforeFork() { 2612 systemstack(beforefork) 2613 } 2614 2615 func afterfork() { 2616 gp := getg().m.curg 2617 2618 // See the comment in beforefork. 2619 gp.stackguard0 = gp.stack.lo + _StackGuard 2620 2621 hz := sched.profilehz 2622 if hz != 0 { 2623 resetcpuprofiler(hz) 2624 } 2625 gp.m.locks-- 2626 } 2627 2628 // Called from syscall package after fork in parent. 2629 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 2630 //go:nosplit 2631 func syscall_runtime_AfterFork() { 2632 systemstack(afterfork) 2633 } 2634 2635 // Allocate a new g, with a stack big enough for stacksize bytes. 2636 func malg(stacksize int32) *g { 2637 newg := new(g) 2638 if stacksize >= 0 { 2639 stacksize = round2(_StackSystem + stacksize) 2640 systemstack(func() { 2641 newg.stack, newg.stkbar = stackalloc(uint32(stacksize)) 2642 }) 2643 newg.stackguard0 = newg.stack.lo + _StackGuard 2644 newg.stackguard1 = ^uintptr(0) 2645 newg.stackAlloc = uintptr(stacksize) 2646 } 2647 return newg 2648 } 2649 2650 // Create a new g running fn with siz bytes of arguments. 2651 // Put it on the queue of g's waiting to run. 2652 // The compiler turns a go statement into a call to this. 2653 // Cannot split the stack because it assumes that the arguments 2654 // are available sequentially after &fn; they would not be 2655 // copied if a stack split occurred. 2656 //go:nosplit 2657 func newproc(siz int32, fn *funcval) { 2658 argp := add(unsafe.Pointer(&fn), sys.PtrSize) 2659 pc := getcallerpc(unsafe.Pointer(&siz)) 2660 systemstack(func() { 2661 newproc1(fn, (*uint8)(argp), siz, 0, pc) 2662 }) 2663 } 2664 2665 // Create a new g running fn with narg bytes of arguments starting 2666 // at argp and returning nret bytes of results. callerpc is the 2667 // address of the go statement that created this. The new g is put 2668 // on the queue of g's waiting to run. 2669 func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g { 2670 _g_ := getg() 2671 2672 if fn == nil { 2673 _g_.m.throwing = -1 // do not dump full stacks 2674 throw("go of nil func value") 2675 } 2676 _g_.m.locks++ // disable preemption because it can be holding p in a local var 2677 siz := narg + nret 2678 siz = (siz + 7) &^ 7 2679 2680 // We could allocate a larger initial stack if necessary. 2681 // Not worth it: this is almost always an error. 2682 // 4*sizeof(uintreg): extra space added below 2683 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 2684 if siz >= _StackMin-4*sys.RegSize-sys.RegSize { 2685 throw("newproc: function arguments too large for new goroutine") 2686 } 2687 2688 _p_ := _g_.m.p.ptr() 2689 newg := gfget(_p_) 2690 if newg == nil { 2691 newg = malg(_StackMin) 2692 casgstatus(newg, _Gidle, _Gdead) 2693 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 2694 } 2695 if newg.stack.hi == 0 { 2696 throw("newproc1: newg missing stack") 2697 } 2698 2699 if readgstatus(newg) != _Gdead { 2700 throw("newproc1: new g is not Gdead") 2701 } 2702 2703 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame 2704 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign 2705 sp := newg.stack.hi - totalSize 2706 spArg := sp 2707 if usesLR { 2708 // caller's LR 2709 *(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil 2710 prepGoExitFrame(sp) 2711 spArg += sys.MinFrameSize 2712 } 2713 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) 2714 2715 memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 2716 newg.sched.sp = sp 2717 newg.stktopsp = sp 2718 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 2719 newg.sched.g = guintptr(unsafe.Pointer(newg)) 2720 gostartcallfn(&newg.sched, fn) 2721 newg.gopc = callerpc 2722 newg.startpc = fn.fn 2723 if isSystemGoroutine(newg) { 2724 atomic.Xadd(&sched.ngsys, +1) 2725 } 2726 casgstatus(newg, _Gdead, _Grunnable) 2727 2728 if _p_.goidcache == _p_.goidcacheend { 2729 // Sched.goidgen is the last allocated id, 2730 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 2731 // At startup sched.goidgen=0, so main goroutine receives goid=1. 2732 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 2733 _p_.goidcache -= _GoidCacheBatch - 1 2734 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 2735 } 2736 newg.goid = int64(_p_.goidcache) 2737 _p_.goidcache++ 2738 if raceenabled { 2739 newg.racectx = racegostart(callerpc) 2740 } 2741 if trace.enabled { 2742 traceGoCreate(newg, newg.startpc) 2743 } 2744 runqput(_p_, newg, true) 2745 2746 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic 2747 wakep() 2748 } 2749 _g_.m.locks-- 2750 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 2751 _g_.stackguard0 = stackPreempt 2752 } 2753 return newg 2754 } 2755 2756 // Put on gfree list. 2757 // If local list is too long, transfer a batch to the global list. 2758 func gfput(_p_ *p, gp *g) { 2759 if readgstatus(gp) != _Gdead { 2760 throw("gfput: bad status (not Gdead)") 2761 } 2762 2763 stksize := gp.stackAlloc 2764 2765 if stksize != _FixedStack { 2766 // non-standard stack size - free it. 2767 stackfree(gp.stack, gp.stackAlloc) 2768 gp.stack.lo = 0 2769 gp.stack.hi = 0 2770 gp.stackguard0 = 0 2771 gp.stkbar = nil 2772 gp.stkbarPos = 0 2773 } else { 2774 // Reset stack barriers. 2775 gp.stkbar = gp.stkbar[:0] 2776 gp.stkbarPos = 0 2777 } 2778 2779 gp.schedlink.set(_p_.gfree) 2780 _p_.gfree = gp 2781 _p_.gfreecnt++ 2782 if _p_.gfreecnt >= 64 { 2783 lock(&sched.gflock) 2784 for _p_.gfreecnt >= 32 { 2785 _p_.gfreecnt-- 2786 gp = _p_.gfree 2787 _p_.gfree = gp.schedlink.ptr() 2788 gp.schedlink.set(sched.gfree) 2789 sched.gfree = gp 2790 sched.ngfree++ 2791 } 2792 unlock(&sched.gflock) 2793 } 2794 } 2795 2796 // Get from gfree list. 2797 // If local list is empty, grab a batch from global list. 2798 func gfget(_p_ *p) *g { 2799 retry: 2800 gp := _p_.gfree 2801 if gp == nil && sched.gfree != nil { 2802 lock(&sched.gflock) 2803 for _p_.gfreecnt < 32 && sched.gfree != nil { 2804 _p_.gfreecnt++ 2805 gp = sched.gfree 2806 sched.gfree = gp.schedlink.ptr() 2807 sched.ngfree-- 2808 gp.schedlink.set(_p_.gfree) 2809 _p_.gfree = gp 2810 } 2811 unlock(&sched.gflock) 2812 goto retry 2813 } 2814 if gp != nil { 2815 _p_.gfree = gp.schedlink.ptr() 2816 _p_.gfreecnt-- 2817 if gp.stack.lo == 0 { 2818 // Stack was deallocated in gfput. Allocate a new one. 2819 systemstack(func() { 2820 gp.stack, gp.stkbar = stackalloc(_FixedStack) 2821 }) 2822 gp.stackguard0 = gp.stack.lo + _StackGuard 2823 gp.stackAlloc = _FixedStack 2824 } else { 2825 if raceenabled { 2826 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) 2827 } 2828 if msanenabled { 2829 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc) 2830 } 2831 } 2832 } 2833 return gp 2834 } 2835 2836 // Purge all cached G's from gfree list to the global list. 2837 func gfpurge(_p_ *p) { 2838 lock(&sched.gflock) 2839 for _p_.gfreecnt != 0 { 2840 _p_.gfreecnt-- 2841 gp := _p_.gfree 2842 _p_.gfree = gp.schedlink.ptr() 2843 gp.schedlink.set(sched.gfree) 2844 sched.gfree = gp 2845 sched.ngfree++ 2846 } 2847 unlock(&sched.gflock) 2848 } 2849 2850 // Breakpoint executes a breakpoint trap. 2851 func Breakpoint() { 2852 breakpoint() 2853 } 2854 2855 // dolockOSThread is called by LockOSThread and lockOSThread below 2856 // after they modify m.locked. Do not allow preemption during this call, 2857 // or else the m might be different in this function than in the caller. 2858 //go:nosplit 2859 func dolockOSThread() { 2860 _g_ := getg() 2861 _g_.m.lockedg = _g_ 2862 _g_.lockedm = _g_.m 2863 } 2864 2865 //go:nosplit 2866 2867 // LockOSThread wires the calling goroutine to its current operating system thread. 2868 // Until the calling goroutine exits or calls UnlockOSThread, it will always 2869 // execute in that thread, and no other goroutine can. 2870 func LockOSThread() { 2871 getg().m.locked |= _LockExternal 2872 dolockOSThread() 2873 } 2874 2875 //go:nosplit 2876 func lockOSThread() { 2877 getg().m.locked += _LockInternal 2878 dolockOSThread() 2879 } 2880 2881 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 2882 // after they update m->locked. Do not allow preemption during this call, 2883 // or else the m might be in different in this function than in the caller. 2884 //go:nosplit 2885 func dounlockOSThread() { 2886 _g_ := getg() 2887 if _g_.m.locked != 0 { 2888 return 2889 } 2890 _g_.m.lockedg = nil 2891 _g_.lockedm = nil 2892 } 2893 2894 //go:nosplit 2895 2896 // UnlockOSThread unwires the calling goroutine from its fixed operating system thread. 2897 // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op. 2898 func UnlockOSThread() { 2899 getg().m.locked &^= _LockExternal 2900 dounlockOSThread() 2901 } 2902 2903 //go:nosplit 2904 func unlockOSThread() { 2905 _g_ := getg() 2906 if _g_.m.locked < _LockInternal { 2907 systemstack(badunlockosthread) 2908 } 2909 _g_.m.locked -= _LockInternal 2910 dounlockOSThread() 2911 } 2912 2913 func badunlockosthread() { 2914 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 2915 } 2916 2917 func gcount() int32 { 2918 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys)) 2919 for i := 0; ; i++ { 2920 _p_ := allp[i] 2921 if _p_ == nil { 2922 break 2923 } 2924 n -= _p_.gfreecnt 2925 } 2926 2927 // All these variables can be changed concurrently, so the result can be inconsistent. 2928 // But at least the current goroutine is running. 2929 if n < 1 { 2930 n = 1 2931 } 2932 return n 2933 } 2934 2935 func mcount() int32 { 2936 return sched.mcount 2937 } 2938 2939 var prof struct { 2940 lock uint32 2941 hz int32 2942 } 2943 2944 func _System() { _System() } 2945 func _ExternalCode() { _ExternalCode() } 2946 func _GC() { _GC() } 2947 2948 // Called if we receive a SIGPROF signal. 2949 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 2950 if prof.hz == 0 { 2951 return 2952 } 2953 2954 // Profiling runs concurrently with GC, so it must not allocate. 2955 mp.mallocing++ 2956 2957 // Define that a "user g" is a user-created goroutine, and a "system g" 2958 // is one that is m->g0 or m->gsignal. 2959 // 2960 // We might be interrupted for profiling halfway through a 2961 // goroutine switch. The switch involves updating three (or four) values: 2962 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 2963 // because once it gets updated the new g is running. 2964 // 2965 // When switching from a user g to a system g, LR is not considered live, 2966 // so the update only affects g, SP, and PC. Since PC must be last, there 2967 // the possible partial transitions in ordinary execution are (1) g alone is updated, 2968 // (2) both g and SP are updated, and (3) SP alone is updated. 2969 // If SP or g alone is updated, we can detect the partial transition by checking 2970 // whether the SP is within g's stack bounds. (We could also require that SP 2971 // be changed only after g, but the stack bounds check is needed by other 2972 // cases, so there is no need to impose an additional requirement.) 2973 // 2974 // There is one exceptional transition to a system g, not in ordinary execution. 2975 // When a signal arrives, the operating system starts the signal handler running 2976 // with an updated PC and SP. The g is updated last, at the beginning of the 2977 // handler. There are two reasons this is okay. First, until g is updated the 2978 // g and SP do not match, so the stack bounds check detects the partial transition. 2979 // Second, signal handlers currently run with signals disabled, so a profiling 2980 // signal cannot arrive during the handler. 2981 // 2982 // When switching from a system g to a user g, there are three possibilities. 2983 // 2984 // First, it may be that the g switch has no PC update, because the SP 2985 // either corresponds to a user g throughout (as in asmcgocall) 2986 // or because it has been arranged to look like a user g frame 2987 // (as in cgocallback_gofunc). In this case, since the entire 2988 // transition is a g+SP update, a partial transition updating just one of 2989 // those will be detected by the stack bounds check. 2990 // 2991 // Second, when returning from a signal handler, the PC and SP updates 2992 // are performed by the operating system in an atomic update, so the g 2993 // update must be done before them. The stack bounds check detects 2994 // the partial transition here, and (again) signal handlers run with signals 2995 // disabled, so a profiling signal cannot arrive then anyway. 2996 // 2997 // Third, the common case: it may be that the switch updates g, SP, and PC 2998 // separately. If the PC is within any of the functions that does this, 2999 // we don't ask for a traceback. C.F. the function setsSP for more about this. 3000 // 3001 // There is another apparently viable approach, recorded here in case 3002 // the "PC within setsSP function" check turns out not to be usable. 3003 // It would be possible to delay the update of either g or SP until immediately 3004 // before the PC update instruction. Then, because of the stack bounds check, 3005 // the only problematic interrupt point is just before that PC update instruction, 3006 // and the sigprof handler can detect that instruction and simulate stepping past 3007 // it in order to reach a consistent state. On ARM, the update of g must be made 3008 // in two places (in R10 and also in a TLS slot), so the delayed update would 3009 // need to be the SP update. The sigprof handler must read the instruction at 3010 // the current PC and if it was the known instruction (for example, JMP BX or 3011 // MOV R2, PC), use that other register in place of the PC value. 3012 // The biggest drawback to this solution is that it requires that we can tell 3013 // whether it's safe to read from the memory pointed at by PC. 3014 // In a correct program, we can test PC == nil and otherwise read, 3015 // but if a profiling signal happens at the instant that a program executes 3016 // a bad jump (before the program manages to handle the resulting fault) 3017 // the profiling handler could fault trying to read nonexistent memory. 3018 // 3019 // To recap, there are no constraints on the assembly being used for the 3020 // transition. We simply require that g and SP match and that the PC is not 3021 // in gogo. 3022 traceback := true 3023 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) { 3024 traceback = false 3025 } 3026 var stk [maxCPUProfStack]uintptr 3027 var haveStackLock *g 3028 n := 0 3029 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 3030 // Cgo, we can't unwind and symbolize arbitrary C code, 3031 // so instead collect Go stack that leads to the cgo call. 3032 // This is especially important on windows, since all syscalls are cgo calls. 3033 if gcTryLockStackBarriers(mp.curg) { 3034 haveStackLock = mp.curg 3035 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0) 3036 } 3037 } else if traceback { 3038 var flags uint = _TraceTrap 3039 if gp.m.curg != nil && gcTryLockStackBarriers(gp.m.curg) { 3040 // It's safe to traceback the user stack. 3041 haveStackLock = gp.m.curg 3042 flags |= _TraceJumpStack 3043 } 3044 // Traceback is safe if we're on the system stack (if 3045 // necessary, flags will stop it before switching to 3046 // the user stack), or if we locked the user stack. 3047 if gp != gp.m.curg || haveStackLock != nil { 3048 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, flags) 3049 } 3050 } 3051 if haveStackLock != nil { 3052 gcUnlockStackBarriers(haveStackLock) 3053 } 3054 3055 if n <= 0 { 3056 // Normal traceback is impossible or has failed. 3057 // See if it falls into several common cases. 3058 n = 0 3059 if GOOS == "windows" && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 3060 // Libcall, i.e. runtime syscall on windows. 3061 // Collect Go stack that leads to the call. 3062 if gcTryLockStackBarriers(mp.libcallg.ptr()) { 3063 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 3064 gcUnlockStackBarriers(mp.libcallg.ptr()) 3065 } 3066 } 3067 if n == 0 { 3068 // If all of the above has failed, account it against abstract "System" or "GC". 3069 n = 2 3070 // "ExternalCode" is better than "etext". 3071 if pc > firstmoduledata.etext { 3072 pc = funcPC(_ExternalCode) + sys.PCQuantum 3073 } 3074 stk[0] = pc 3075 if mp.preemptoff != "" || mp.helpgc != 0 { 3076 stk[1] = funcPC(_GC) + sys.PCQuantum 3077 } else { 3078 stk[1] = funcPC(_System) + sys.PCQuantum 3079 } 3080 } 3081 } 3082 3083 if prof.hz != 0 { 3084 // Simple cas-lock to coordinate with setcpuprofilerate. 3085 for !atomic.Cas(&prof.lock, 0, 1) { 3086 osyield() 3087 } 3088 if prof.hz != 0 { 3089 cpuprof.add(stk[:n]) 3090 } 3091 atomic.Store(&prof.lock, 0) 3092 } 3093 mp.mallocing-- 3094 } 3095 3096 // Reports whether a function will set the SP 3097 // to an absolute value. Important that 3098 // we don't traceback when these are at the bottom 3099 // of the stack since we can't be sure that we will 3100 // find the caller. 3101 // 3102 // If the function is not on the bottom of the stack 3103 // we assume that it will have set it up so that traceback will be consistent, 3104 // either by being a traceback terminating function 3105 // or putting one on the stack at the right offset. 3106 func setsSP(pc uintptr) bool { 3107 f := findfunc(pc) 3108 if f == nil { 3109 // couldn't find the function for this PC, 3110 // so assume the worst and stop traceback 3111 return true 3112 } 3113 switch f.entry { 3114 case gogoPC, systemstackPC, mcallPC, morestackPC: 3115 return true 3116 } 3117 return false 3118 } 3119 3120 // Arrange to call fn with a traceback hz times a second. 3121 func setcpuprofilerate_m(hz int32) { 3122 // Force sane arguments. 3123 if hz < 0 { 3124 hz = 0 3125 } 3126 3127 // Disable preemption, otherwise we can be rescheduled to another thread 3128 // that has profiling enabled. 3129 _g_ := getg() 3130 _g_.m.locks++ 3131 3132 // Stop profiler on this thread so that it is safe to lock prof. 3133 // if a profiling signal came in while we had prof locked, 3134 // it would deadlock. 3135 resetcpuprofiler(0) 3136 3137 for !atomic.Cas(&prof.lock, 0, 1) { 3138 osyield() 3139 } 3140 prof.hz = hz 3141 atomic.Store(&prof.lock, 0) 3142 3143 lock(&sched.lock) 3144 sched.profilehz = hz 3145 unlock(&sched.lock) 3146 3147 if hz != 0 { 3148 resetcpuprofiler(hz) 3149 } 3150 3151 _g_.m.locks-- 3152 } 3153 3154 // Change number of processors. The world is stopped, sched is locked. 3155 // gcworkbufs are not being modified by either the GC or 3156 // the write barrier code. 3157 // Returns list of Ps with local work, they need to be scheduled by the caller. 3158 func procresize(nprocs int32) *p { 3159 old := gomaxprocs 3160 if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs { 3161 throw("procresize: invalid arg") 3162 } 3163 if trace.enabled { 3164 traceGomaxprocs(nprocs) 3165 } 3166 3167 // update statistics 3168 now := nanotime() 3169 if sched.procresizetime != 0 { 3170 sched.totaltime += int64(old) * (now - sched.procresizetime) 3171 } 3172 sched.procresizetime = now 3173 3174 // initialize new P's 3175 for i := int32(0); i < nprocs; i++ { 3176 pp := allp[i] 3177 if pp == nil { 3178 pp = new(p) 3179 pp.id = i 3180 pp.status = _Pgcstop 3181 pp.sudogcache = pp.sudogbuf[:0] 3182 for i := range pp.deferpool { 3183 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 3184 } 3185 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 3186 } 3187 if pp.mcache == nil { 3188 if old == 0 && i == 0 { 3189 if getg().m.mcache == nil { 3190 throw("missing mcache?") 3191 } 3192 pp.mcache = getg().m.mcache // bootstrap 3193 } else { 3194 pp.mcache = allocmcache() 3195 } 3196 } 3197 } 3198 3199 // free unused P's 3200 for i := nprocs; i < old; i++ { 3201 p := allp[i] 3202 if trace.enabled { 3203 if p == getg().m.p.ptr() { 3204 // moving to p[0], pretend that we were descheduled 3205 // and then scheduled again to keep the trace sane. 3206 traceGoSched() 3207 traceProcStop(p) 3208 } 3209 } 3210 // move all runnable goroutines to the global queue 3211 for p.runqhead != p.runqtail { 3212 // pop from tail of local queue 3213 p.runqtail-- 3214 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr() 3215 // push onto head of global queue 3216 globrunqputhead(gp) 3217 } 3218 if p.runnext != 0 { 3219 globrunqputhead(p.runnext.ptr()) 3220 p.runnext = 0 3221 } 3222 // if there's a background worker, make it runnable and put 3223 // it on the global queue so it can clean itself up 3224 if gp := p.gcBgMarkWorker.ptr(); gp != nil { 3225 casgstatus(gp, _Gwaiting, _Grunnable) 3226 if trace.enabled { 3227 traceGoUnpark(gp, 0) 3228 } 3229 globrunqput(gp) 3230 // This assignment doesn't race because the 3231 // world is stopped. 3232 p.gcBgMarkWorker.set(nil) 3233 } 3234 for i := range p.sudogbuf { 3235 p.sudogbuf[i] = nil 3236 } 3237 p.sudogcache = p.sudogbuf[:0] 3238 for i := range p.deferpool { 3239 for j := range p.deferpoolbuf[i] { 3240 p.deferpoolbuf[i][j] = nil 3241 } 3242 p.deferpool[i] = p.deferpoolbuf[i][:0] 3243 } 3244 freemcache(p.mcache) 3245 p.mcache = nil 3246 gfpurge(p) 3247 traceProcFree(p) 3248 p.status = _Pdead 3249 // can't free P itself because it can be referenced by an M in syscall 3250 } 3251 3252 _g_ := getg() 3253 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 3254 // continue to use the current P 3255 _g_.m.p.ptr().status = _Prunning 3256 } else { 3257 // release the current P and acquire allp[0] 3258 if _g_.m.p != 0 { 3259 _g_.m.p.ptr().m = 0 3260 } 3261 _g_.m.p = 0 3262 _g_.m.mcache = nil 3263 p := allp[0] 3264 p.m = 0 3265 p.status = _Pidle 3266 acquirep(p) 3267 if trace.enabled { 3268 traceGoStart() 3269 } 3270 } 3271 var runnablePs *p 3272 for i := nprocs - 1; i >= 0; i-- { 3273 p := allp[i] 3274 if _g_.m.p.ptr() == p { 3275 continue 3276 } 3277 p.status = _Pidle 3278 if runqempty(p) { 3279 pidleput(p) 3280 } else { 3281 p.m.set(mget()) 3282 p.link.set(runnablePs) 3283 runnablePs = p 3284 } 3285 } 3286 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 3287 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 3288 return runnablePs 3289 } 3290 3291 // Associate p and the current m. 3292 func acquirep(_p_ *p) { 3293 acquirep1(_p_) 3294 3295 // have p; write barriers now allowed 3296 _g_ := getg() 3297 _g_.m.mcache = _p_.mcache 3298 3299 if trace.enabled { 3300 traceProcStart() 3301 } 3302 } 3303 3304 // May run during STW, so write barriers are not allowed. 3305 //go:nowritebarrier 3306 func acquirep1(_p_ *p) { 3307 _g_ := getg() 3308 3309 if _g_.m.p != 0 || _g_.m.mcache != nil { 3310 throw("acquirep: already in go") 3311 } 3312 if _p_.m != 0 || _p_.status != _Pidle { 3313 id := int32(0) 3314 if _p_.m != 0 { 3315 id = _p_.m.ptr().id 3316 } 3317 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 3318 throw("acquirep: invalid p state") 3319 } 3320 _g_.m.p.set(_p_) 3321 _p_.m.set(_g_.m) 3322 _p_.status = _Prunning 3323 } 3324 3325 // Disassociate p and the current m. 3326 func releasep() *p { 3327 _g_ := getg() 3328 3329 if _g_.m.p == 0 || _g_.m.mcache == nil { 3330 throw("releasep: invalid arg") 3331 } 3332 _p_ := _g_.m.p.ptr() 3333 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 3334 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 3335 throw("releasep: invalid p state") 3336 } 3337 if trace.enabled { 3338 traceProcStop(_g_.m.p.ptr()) 3339 } 3340 _g_.m.p = 0 3341 _g_.m.mcache = nil 3342 _p_.m = 0 3343 _p_.status = _Pidle 3344 return _p_ 3345 } 3346 3347 func incidlelocked(v int32) { 3348 lock(&sched.lock) 3349 sched.nmidlelocked += v 3350 if v > 0 { 3351 checkdead() 3352 } 3353 unlock(&sched.lock) 3354 } 3355 3356 // Check for deadlock situation. 3357 // The check is based on number of running M's, if 0 -> deadlock. 3358 func checkdead() { 3359 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 3360 // there are no running goroutines. The calling program is 3361 // assumed to be running. 3362 if islibrary || isarchive { 3363 return 3364 } 3365 3366 // If we are dying because of a signal caught on an already idle thread, 3367 // freezetheworld will cause all running threads to block. 3368 // And runtime will essentially enter into deadlock state, 3369 // except that there is a thread that will call exit soon. 3370 if panicking > 0 { 3371 return 3372 } 3373 3374 // -1 for sysmon 3375 run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1 3376 if run > 0 { 3377 return 3378 } 3379 if run < 0 { 3380 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n") 3381 throw("checkdead: inconsistent counts") 3382 } 3383 3384 grunning := 0 3385 lock(&allglock) 3386 for i := 0; i < len(allgs); i++ { 3387 gp := allgs[i] 3388 if isSystemGoroutine(gp) { 3389 continue 3390 } 3391 s := readgstatus(gp) 3392 switch s &^ _Gscan { 3393 case _Gwaiting: 3394 grunning++ 3395 case _Grunnable, 3396 _Grunning, 3397 _Gsyscall: 3398 unlock(&allglock) 3399 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 3400 throw("checkdead: runnable g") 3401 } 3402 } 3403 unlock(&allglock) 3404 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 3405 throw("no goroutines (main called runtime.Goexit) - deadlock!") 3406 } 3407 3408 // Maybe jump time forward for playground. 3409 gp := timejump() 3410 if gp != nil { 3411 casgstatus(gp, _Gwaiting, _Grunnable) 3412 globrunqput(gp) 3413 _p_ := pidleget() 3414 if _p_ == nil { 3415 throw("checkdead: no p for timer") 3416 } 3417 mp := mget() 3418 if mp == nil { 3419 // There should always be a free M since 3420 // nothing is running. 3421 throw("checkdead: no m for timer") 3422 } 3423 mp.nextp.set(_p_) 3424 notewakeup(&mp.park) 3425 return 3426 } 3427 3428 getg().m.throwing = -1 // do not dump full stacks 3429 throw("all goroutines are asleep - deadlock!") 3430 } 3431 3432 // forcegcperiod is the maximum time in nanoseconds between garbage 3433 // collections. If we go this long without a garbage collection, one 3434 // is forced to run. 3435 // 3436 // This is a variable for testing purposes. It normally doesn't change. 3437 var forcegcperiod int64 = 2 * 60 * 1e9 3438 3439 // Always runs without a P, so write barriers are not allowed. 3440 // 3441 //go:nowritebarrierrec 3442 func sysmon() { 3443 // If a heap span goes unused for 5 minutes after a garbage collection, 3444 // we hand it back to the operating system. 3445 scavengelimit := int64(5 * 60 * 1e9) 3446 3447 if debug.scavenge > 0 { 3448 // Scavenge-a-lot for testing. 3449 forcegcperiod = 10 * 1e6 3450 scavengelimit = 20 * 1e6 3451 } 3452 3453 lastscavenge := nanotime() 3454 nscavenge := 0 3455 3456 lasttrace := int64(0) 3457 idle := 0 // how many cycles in succession we had not wokeup somebody 3458 delay := uint32(0) 3459 for { 3460 if idle == 0 { // start with 20us sleep... 3461 delay = 20 3462 } else if idle > 50 { // start doubling the sleep after 1ms... 3463 delay *= 2 3464 } 3465 if delay > 10*1000 { // up to 10ms 3466 delay = 10 * 1000 3467 } 3468 usleep(delay) 3469 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic 3470 lock(&sched.lock) 3471 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 3472 atomic.Store(&sched.sysmonwait, 1) 3473 unlock(&sched.lock) 3474 // Make wake-up period small enough 3475 // for the sampling to be correct. 3476 maxsleep := forcegcperiod / 2 3477 if scavengelimit < forcegcperiod { 3478 maxsleep = scavengelimit / 2 3479 } 3480 notetsleep(&sched.sysmonnote, maxsleep) 3481 lock(&sched.lock) 3482 atomic.Store(&sched.sysmonwait, 0) 3483 noteclear(&sched.sysmonnote) 3484 idle = 0 3485 delay = 20 3486 } 3487 unlock(&sched.lock) 3488 } 3489 // poll network if not polled for more than 10ms 3490 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 3491 now := nanotime() 3492 unixnow := unixnanotime() 3493 if lastpoll != 0 && lastpoll+10*1000*1000 < now { 3494 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 3495 gp := netpoll(false) // non-blocking - returns list of goroutines 3496 if gp != nil { 3497 // Need to decrement number of idle locked M's 3498 // (pretending that one more is running) before injectglist. 3499 // Otherwise it can lead to the following situation: 3500 // injectglist grabs all P's but before it starts M's to run the P's, 3501 // another M returns from syscall, finishes running its G, 3502 // observes that there is no work to do and no other running M's 3503 // and reports deadlock. 3504 incidlelocked(-1) 3505 injectglist(gp) 3506 incidlelocked(1) 3507 } 3508 } 3509 // retake P's blocked in syscalls 3510 // and preempt long running G's 3511 if retake(now) != 0 { 3512 idle = 0 3513 } else { 3514 idle++ 3515 } 3516 // check if we need to force a GC 3517 lastgc := int64(atomic.Load64(&memstats.last_gc)) 3518 if gcphase == _GCoff && lastgc != 0 && unixnow-lastgc > forcegcperiod && atomic.Load(&forcegc.idle) != 0 { 3519 lock(&forcegc.lock) 3520 forcegc.idle = 0 3521 forcegc.g.schedlink = 0 3522 injectglist(forcegc.g) 3523 unlock(&forcegc.lock) 3524 } 3525 // scavenge heap once in a while 3526 if lastscavenge+scavengelimit/2 < now { 3527 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) 3528 lastscavenge = now 3529 nscavenge++ 3530 } 3531 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { 3532 lasttrace = now 3533 schedtrace(debug.scheddetail > 0) 3534 } 3535 } 3536 } 3537 3538 var pdesc [_MaxGomaxprocs]struct { 3539 schedtick uint32 3540 schedwhen int64 3541 syscalltick uint32 3542 syscallwhen int64 3543 } 3544 3545 // forcePreemptNS is the time slice given to a G before it is 3546 // preempted. 3547 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 3548 3549 func retake(now int64) uint32 { 3550 n := 0 3551 for i := int32(0); i < gomaxprocs; i++ { 3552 _p_ := allp[i] 3553 if _p_ == nil { 3554 continue 3555 } 3556 pd := &pdesc[i] 3557 s := _p_.status 3558 if s == _Psyscall { 3559 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 3560 t := int64(_p_.syscalltick) 3561 if int64(pd.syscalltick) != t { 3562 pd.syscalltick = uint32(t) 3563 pd.syscallwhen = now 3564 continue 3565 } 3566 // On the one hand we don't want to retake Ps if there is no other work to do, 3567 // but on the other hand we want to retake them eventually 3568 // because they can prevent the sysmon thread from deep sleep. 3569 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 3570 continue 3571 } 3572 // Need to decrement number of idle locked M's 3573 // (pretending that one more is running) before the CAS. 3574 // Otherwise the M from which we retake can exit the syscall, 3575 // increment nmidle and report deadlock. 3576 incidlelocked(-1) 3577 if atomic.Cas(&_p_.status, s, _Pidle) { 3578 if trace.enabled { 3579 traceGoSysBlock(_p_) 3580 traceProcStop(_p_) 3581 } 3582 n++ 3583 _p_.syscalltick++ 3584 handoffp(_p_) 3585 } 3586 incidlelocked(1) 3587 } else if s == _Prunning { 3588 // Preempt G if it's running for too long. 3589 t := int64(_p_.schedtick) 3590 if int64(pd.schedtick) != t { 3591 pd.schedtick = uint32(t) 3592 pd.schedwhen = now 3593 continue 3594 } 3595 if pd.schedwhen+forcePreemptNS > now { 3596 continue 3597 } 3598 preemptone(_p_) 3599 } 3600 } 3601 return uint32(n) 3602 } 3603 3604 // Tell all goroutines that they have been preempted and they should stop. 3605 // This function is purely best-effort. It can fail to inform a goroutine if a 3606 // processor just started running it. 3607 // No locks need to be held. 3608 // Returns true if preemption request was issued to at least one goroutine. 3609 func preemptall() bool { 3610 res := false 3611 for i := int32(0); i < gomaxprocs; i++ { 3612 _p_ := allp[i] 3613 if _p_ == nil || _p_.status != _Prunning { 3614 continue 3615 } 3616 if preemptone(_p_) { 3617 res = true 3618 } 3619 } 3620 return res 3621 } 3622 3623 // Tell the goroutine running on processor P to stop. 3624 // This function is purely best-effort. It can incorrectly fail to inform the 3625 // goroutine. It can send inform the wrong goroutine. Even if it informs the 3626 // correct goroutine, that goroutine might ignore the request if it is 3627 // simultaneously executing newstack. 3628 // No lock needs to be held. 3629 // Returns true if preemption request was issued. 3630 // The actual preemption will happen at some point in the future 3631 // and will be indicated by the gp->status no longer being 3632 // Grunning 3633 func preemptone(_p_ *p) bool { 3634 mp := _p_.m.ptr() 3635 if mp == nil || mp == getg().m { 3636 return false 3637 } 3638 gp := mp.curg 3639 if gp == nil || gp == mp.g0 { 3640 return false 3641 } 3642 3643 gp.preempt = true 3644 3645 // Every call in a go routine checks for stack overflow by 3646 // comparing the current stack pointer to gp->stackguard0. 3647 // Setting gp->stackguard0 to StackPreempt folds 3648 // preemption into the normal stack overflow check. 3649 gp.stackguard0 = stackPreempt 3650 return true 3651 } 3652 3653 var starttime int64 3654 3655 func schedtrace(detailed bool) { 3656 now := nanotime() 3657 if starttime == 0 { 3658 starttime = now 3659 } 3660 3661 lock(&sched.lock) 3662 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 3663 if detailed { 3664 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 3665 } 3666 // We must be careful while reading data from P's, M's and G's. 3667 // Even if we hold schedlock, most data can be changed concurrently. 3668 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 3669 for i := int32(0); i < gomaxprocs; i++ { 3670 _p_ := allp[i] 3671 if _p_ == nil { 3672 continue 3673 } 3674 mp := _p_.m.ptr() 3675 h := atomic.Load(&_p_.runqhead) 3676 t := atomic.Load(&_p_.runqtail) 3677 if detailed { 3678 id := int32(-1) 3679 if mp != nil { 3680 id = mp.id 3681 } 3682 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") 3683 } else { 3684 // In non-detailed mode format lengths of per-P run queues as: 3685 // [len1 len2 len3 len4] 3686 print(" ") 3687 if i == 0 { 3688 print("[") 3689 } 3690 print(t - h) 3691 if i == gomaxprocs-1 { 3692 print("]\n") 3693 } 3694 } 3695 } 3696 3697 if !detailed { 3698 unlock(&sched.lock) 3699 return 3700 } 3701 3702 for mp := allm; mp != nil; mp = mp.alllink { 3703 _p_ := mp.p.ptr() 3704 gp := mp.curg 3705 lockedg := mp.lockedg 3706 id1 := int32(-1) 3707 if _p_ != nil { 3708 id1 = _p_.id 3709 } 3710 id2 := int64(-1) 3711 if gp != nil { 3712 id2 = gp.goid 3713 } 3714 id3 := int64(-1) 3715 if lockedg != nil { 3716 id3 = lockedg.goid 3717 } 3718 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", getg().m.blocked, " lockedg=", id3, "\n") 3719 } 3720 3721 lock(&allglock) 3722 for gi := 0; gi < len(allgs); gi++ { 3723 gp := allgs[gi] 3724 mp := gp.m 3725 lockedm := gp.lockedm 3726 id1 := int32(-1) 3727 if mp != nil { 3728 id1 = mp.id 3729 } 3730 id2 := int32(-1) 3731 if lockedm != nil { 3732 id2 = lockedm.id 3733 } 3734 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n") 3735 } 3736 unlock(&allglock) 3737 unlock(&sched.lock) 3738 } 3739 3740 // Put mp on midle list. 3741 // Sched must be locked. 3742 // May run during STW, so write barriers are not allowed. 3743 //go:nowritebarrier 3744 func mput(mp *m) { 3745 mp.schedlink = sched.midle 3746 sched.midle.set(mp) 3747 sched.nmidle++ 3748 checkdead() 3749 } 3750 3751 // Try to get an m from midle list. 3752 // Sched must be locked. 3753 // May run during STW, so write barriers are not allowed. 3754 //go:nowritebarrier 3755 func mget() *m { 3756 mp := sched.midle.ptr() 3757 if mp != nil { 3758 sched.midle = mp.schedlink 3759 sched.nmidle-- 3760 } 3761 return mp 3762 } 3763 3764 // Put gp on the global runnable queue. 3765 // Sched must be locked. 3766 // May run during STW, so write barriers are not allowed. 3767 //go:nowritebarrier 3768 func globrunqput(gp *g) { 3769 gp.schedlink = 0 3770 if sched.runqtail != 0 { 3771 sched.runqtail.ptr().schedlink.set(gp) 3772 } else { 3773 sched.runqhead.set(gp) 3774 } 3775 sched.runqtail.set(gp) 3776 sched.runqsize++ 3777 } 3778 3779 // Put gp at the head of the global runnable queue. 3780 // Sched must be locked. 3781 // May run during STW, so write barriers are not allowed. 3782 //go:nowritebarrier 3783 func globrunqputhead(gp *g) { 3784 gp.schedlink = sched.runqhead 3785 sched.runqhead.set(gp) 3786 if sched.runqtail == 0 { 3787 sched.runqtail.set(gp) 3788 } 3789 sched.runqsize++ 3790 } 3791 3792 // Put a batch of runnable goroutines on the global runnable queue. 3793 // Sched must be locked. 3794 func globrunqputbatch(ghead *g, gtail *g, n int32) { 3795 gtail.schedlink = 0 3796 if sched.runqtail != 0 { 3797 sched.runqtail.ptr().schedlink.set(ghead) 3798 } else { 3799 sched.runqhead.set(ghead) 3800 } 3801 sched.runqtail.set(gtail) 3802 sched.runqsize += n 3803 } 3804 3805 // Try get a batch of G's from the global runnable queue. 3806 // Sched must be locked. 3807 func globrunqget(_p_ *p, max int32) *g { 3808 if sched.runqsize == 0 { 3809 return nil 3810 } 3811 3812 n := sched.runqsize/gomaxprocs + 1 3813 if n > sched.runqsize { 3814 n = sched.runqsize 3815 } 3816 if max > 0 && n > max { 3817 n = max 3818 } 3819 if n > int32(len(_p_.runq))/2 { 3820 n = int32(len(_p_.runq)) / 2 3821 } 3822 3823 sched.runqsize -= n 3824 if sched.runqsize == 0 { 3825 sched.runqtail = 0 3826 } 3827 3828 gp := sched.runqhead.ptr() 3829 sched.runqhead = gp.schedlink 3830 n-- 3831 for ; n > 0; n-- { 3832 gp1 := sched.runqhead.ptr() 3833 sched.runqhead = gp1.schedlink 3834 runqput(_p_, gp1, false) 3835 } 3836 return gp 3837 } 3838 3839 // Put p to on _Pidle list. 3840 // Sched must be locked. 3841 // May run during STW, so write barriers are not allowed. 3842 //go:nowritebarrier 3843 func pidleput(_p_ *p) { 3844 if !runqempty(_p_) { 3845 throw("pidleput: P has non-empty run queue") 3846 } 3847 _p_.link = sched.pidle 3848 sched.pidle.set(_p_) 3849 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 3850 } 3851 3852 // Try get a p from _Pidle list. 3853 // Sched must be locked. 3854 // May run during STW, so write barriers are not allowed. 3855 //go:nowritebarrier 3856 func pidleget() *p { 3857 _p_ := sched.pidle.ptr() 3858 if _p_ != nil { 3859 sched.pidle = _p_.link 3860 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 3861 } 3862 return _p_ 3863 } 3864 3865 // runqempty returns true if _p_ has no Gs on its local run queue. 3866 // Note that this test is generally racy. 3867 func runqempty(_p_ *p) bool { 3868 return _p_.runqhead == _p_.runqtail && _p_.runnext == 0 3869 } 3870 3871 // To shake out latent assumptions about scheduling order, 3872 // we introduce some randomness into scheduling decisions 3873 // when running with the race detector. 3874 // The need for this was made obvious by changing the 3875 // (deterministic) scheduling order in Go 1.5 and breaking 3876 // many poorly-written tests. 3877 // With the randomness here, as long as the tests pass 3878 // consistently with -race, they shouldn't have latent scheduling 3879 // assumptions. 3880 const randomizeScheduler = raceenabled 3881 3882 // runqput tries to put g on the local runnable queue. 3883 // If next if false, runqput adds g to the tail of the runnable queue. 3884 // If next is true, runqput puts g in the _p_.runnext slot. 3885 // If the run queue is full, runnext puts g on the global queue. 3886 // Executed only by the owner P. 3887 func runqput(_p_ *p, gp *g, next bool) { 3888 if randomizeScheduler && next && fastrand1()%2 == 0 { 3889 next = false 3890 } 3891 3892 if next { 3893 retryNext: 3894 oldnext := _p_.runnext 3895 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 3896 goto retryNext 3897 } 3898 if oldnext == 0 { 3899 return 3900 } 3901 // Kick the old runnext out to the regular run queue. 3902 gp = oldnext.ptr() 3903 } 3904 3905 retry: 3906 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 3907 t := _p_.runqtail 3908 if t-h < uint32(len(_p_.runq)) { 3909 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 3910 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 3911 return 3912 } 3913 if runqputslow(_p_, gp, h, t) { 3914 return 3915 } 3916 // the queue is not full, now the put above must suceed 3917 goto retry 3918 } 3919 3920 // Put g and a batch of work from local runnable queue on global queue. 3921 // Executed only by the owner P. 3922 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 3923 var batch [len(_p_.runq)/2 + 1]*g 3924 3925 // First, grab a batch from local queue. 3926 n := t - h 3927 n = n / 2 3928 if n != uint32(len(_p_.runq)/2) { 3929 throw("runqputslow: queue is not full") 3930 } 3931 for i := uint32(0); i < n; i++ { 3932 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 3933 } 3934 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 3935 return false 3936 } 3937 batch[n] = gp 3938 3939 if randomizeScheduler { 3940 for i := uint32(1); i <= n; i++ { 3941 j := fastrand1() % (i + 1) 3942 batch[i], batch[j] = batch[j], batch[i] 3943 } 3944 } 3945 3946 // Link the goroutines. 3947 for i := uint32(0); i < n; i++ { 3948 batch[i].schedlink.set(batch[i+1]) 3949 } 3950 3951 // Now put the batch on global queue. 3952 lock(&sched.lock) 3953 globrunqputbatch(batch[0], batch[n], int32(n+1)) 3954 unlock(&sched.lock) 3955 return true 3956 } 3957 3958 // Get g from local runnable queue. 3959 // If inheritTime is true, gp should inherit the remaining time in the 3960 // current time slice. Otherwise, it should start a new time slice. 3961 // Executed only by the owner P. 3962 func runqget(_p_ *p) (gp *g, inheritTime bool) { 3963 // If there's a runnext, it's the next G to run. 3964 for { 3965 next := _p_.runnext 3966 if next == 0 { 3967 break 3968 } 3969 if _p_.runnext.cas(next, 0) { 3970 return next.ptr(), true 3971 } 3972 } 3973 3974 for { 3975 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 3976 t := _p_.runqtail 3977 if t == h { 3978 return nil, false 3979 } 3980 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 3981 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume 3982 return gp, false 3983 } 3984 } 3985 } 3986 3987 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 3988 // Batch is a ring buffer starting at batchHead. 3989 // Returns number of grabbed goroutines. 3990 // Can be executed by any P. 3991 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 3992 for { 3993 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 3994 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer 3995 n := t - h 3996 n = n - n/2 3997 if n == 0 { 3998 if stealRunNextG { 3999 // Try to steal from _p_.runnext. 4000 if next := _p_.runnext; next != 0 { 4001 // Sleep to ensure that _p_ isn't about to run the g we 4002 // are about to steal. 4003 // The important use case here is when the g running on _p_ 4004 // ready()s another g and then almost immediately blocks. 4005 // Instead of stealing runnext in this window, back off 4006 // to give _p_ a chance to schedule runnext. This will avoid 4007 // thrashing gs between different Ps. 4008 usleep(100) 4009 if !_p_.runnext.cas(next, 0) { 4010 continue 4011 } 4012 batch[batchHead%uint32(len(batch))] = next 4013 return 1 4014 } 4015 } 4016 return 0 4017 } 4018 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 4019 continue 4020 } 4021 for i := uint32(0); i < n; i++ { 4022 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 4023 batch[(batchHead+i)%uint32(len(batch))] = g 4024 } 4025 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4026 return n 4027 } 4028 } 4029 } 4030 4031 // Steal half of elements from local runnable queue of p2 4032 // and put onto local runnable queue of p. 4033 // Returns one of the stolen elements (or nil if failed). 4034 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 4035 t := _p_.runqtail 4036 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 4037 if n == 0 { 4038 return nil 4039 } 4040 n-- 4041 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 4042 if n == 0 { 4043 return gp 4044 } 4045 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 4046 if t-h+n >= uint32(len(_p_.runq)) { 4047 throw("runqsteal: runq overflow") 4048 } 4049 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 4050 return gp 4051 } 4052 4053 func testSchedLocalQueue() { 4054 _p_ := new(p) 4055 gs := make([]g, len(_p_.runq)) 4056 for i := 0; i < len(_p_.runq); i++ { 4057 if g, _ := runqget(_p_); g != nil { 4058 throw("runq is not empty initially") 4059 } 4060 for j := 0; j < i; j++ { 4061 runqput(_p_, &gs[i], false) 4062 } 4063 for j := 0; j < i; j++ { 4064 if g, _ := runqget(_p_); g != &gs[i] { 4065 print("bad element at iter ", i, "/", j, "\n") 4066 throw("bad element") 4067 } 4068 } 4069 if g, _ := runqget(_p_); g != nil { 4070 throw("runq is not empty afterwards") 4071 } 4072 } 4073 } 4074 4075 func testSchedLocalQueueSteal() { 4076 p1 := new(p) 4077 p2 := new(p) 4078 gs := make([]g, len(p1.runq)) 4079 for i := 0; i < len(p1.runq); i++ { 4080 for j := 0; j < i; j++ { 4081 gs[j].sig = 0 4082 runqput(p1, &gs[j], false) 4083 } 4084 gp := runqsteal(p2, p1, true) 4085 s := 0 4086 if gp != nil { 4087 s++ 4088 gp.sig++ 4089 } 4090 for { 4091 gp, _ = runqget(p2) 4092 if gp == nil { 4093 break 4094 } 4095 s++ 4096 gp.sig++ 4097 } 4098 for { 4099 gp, _ = runqget(p1) 4100 if gp == nil { 4101 break 4102 } 4103 gp.sig++ 4104 } 4105 for j := 0; j < i; j++ { 4106 if gs[j].sig != 1 { 4107 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n") 4108 throw("bad element") 4109 } 4110 } 4111 if s != i/2 && s != i/2+1 { 4112 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n") 4113 throw("bad steal") 4114 } 4115 } 4116 } 4117 4118 //go:linkname setMaxThreads runtime/debug.setMaxThreads 4119 func setMaxThreads(in int) (out int) { 4120 lock(&sched.lock) 4121 out = int(sched.maxmcount) 4122 sched.maxmcount = int32(in) 4123 checkmcount() 4124 unlock(&sched.lock) 4125 return 4126 } 4127 4128 func haveexperiment(name string) bool { 4129 x := sys.Goexperiment 4130 for x != "" { 4131 xname := "" 4132 i := index(x, ",") 4133 if i < 0 { 4134 xname, x = x, "" 4135 } else { 4136 xname, x = x[:i], x[i+1:] 4137 } 4138 if xname == name { 4139 return true 4140 } 4141 } 4142 return false 4143 } 4144 4145 //go:nosplit 4146 func procPin() int { 4147 _g_ := getg() 4148 mp := _g_.m 4149 4150 mp.locks++ 4151 return int(mp.p.ptr().id) 4152 } 4153 4154 //go:nosplit 4155 func procUnpin() { 4156 _g_ := getg() 4157 _g_.m.locks-- 4158 } 4159 4160 //go:linkname sync_runtime_procPin sync.runtime_procPin 4161 //go:nosplit 4162 func sync_runtime_procPin() int { 4163 return procPin() 4164 } 4165 4166 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 4167 //go:nosplit 4168 func sync_runtime_procUnpin() { 4169 procUnpin() 4170 } 4171 4172 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 4173 //go:nosplit 4174 func sync_atomic_runtime_procPin() int { 4175 return procPin() 4176 } 4177 4178 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 4179 //go:nosplit 4180 func sync_atomic_runtime_procUnpin() { 4181 procUnpin() 4182 } 4183 4184 // Active spinning for sync.Mutex. 4185 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 4186 //go:nosplit 4187 func sync_runtime_canSpin(i int) bool { 4188 // sync.Mutex is cooperative, so we are conservative with spinning. 4189 // Spin only few times and only if running on a multicore machine and 4190 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 4191 // As opposed to runtime mutex we don't do passive spinning here, 4192 // because there can be work on global runq on on other Ps. 4193 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 4194 return false 4195 } 4196 if p := getg().m.p.ptr(); !runqempty(p) { 4197 return false 4198 } 4199 return true 4200 } 4201 4202 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 4203 //go:nosplit 4204 func sync_runtime_doSpin() { 4205 procyield(active_spin_cnt) 4206 }