github.com/epfl-dcsl/gotee@v0.0.0-20200909122901-014b35f5e5e9/src/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 var buildVersion = sys.TheVersion 14 15 // Goroutine scheduler 16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 17 // 18 // The main concepts are: 19 // G - goroutine. 20 // M - worker thread, or machine. 21 // P - processor, a resource that is required to execute Go code. 22 // M must have an associated P to execute Go code, however it can be 23 // blocked or in a syscall w/o an associated P. 24 // 25 // Design doc at https://golang.org/s/go11sched. 26 27 // Worker thread parking/unparking. 28 // We need to balance between keeping enough running worker threads to utilize 29 // available hardware parallelism and parking excessive running worker threads 30 // to conserve CPU resources and power. This is not simple for two reasons: 31 // (1) scheduler state is intentionally distributed (in particular, per-P work 32 // queues), so it is not possible to compute global predicates on fast paths; 33 // (2) for optimal thread management we would need to know the future (don't park 34 // a worker thread when a new goroutine will be readied in near future). 35 // 36 // Three rejected approaches that would work badly: 37 // 1. Centralize all scheduler state (would inhibit scalability). 38 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there 39 // is a spare P, unpark a thread and handoff it the thread and the goroutine. 40 // This would lead to thread state thrashing, as the thread that readied the 41 // goroutine can be out of work the very next moment, we will need to park it. 42 // Also, it would destroy locality of computation as we want to preserve 43 // dependent goroutines on the same thread; and introduce additional latency. 44 // 3. Unpark an additional thread whenever we ready a goroutine and there is an 45 // idle P, but don't do handoff. This would lead to excessive thread parking/ 46 // unparking as the additional threads will instantly park without discovering 47 // any work to do. 48 // 49 // The current approach: 50 // We unpark an additional thread when we ready a goroutine if (1) there is an 51 // idle P and there are no "spinning" worker threads. A worker thread is considered 52 // spinning if it is out of local work and did not find work in global run queue/ 53 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. 54 // Threads unparked this way are also considered spinning; we don't do goroutine 55 // handoff so such threads are out of work initially. Spinning threads do some 56 // spinning looking for work in per-P run queues before parking. If a spinning 57 // thread finds work it takes itself out of the spinning state and proceeds to 58 // execution. If it does not find work it takes itself out of the spinning state 59 // and then parks. 60 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark 61 // new threads when readying goroutines. To compensate for that, if the last spinning 62 // thread finds work and stops spinning, it must unpark a new spinning thread. 63 // This approach smooths out unjustified spikes of thread unparking, 64 // but at the same time guarantees eventual maximal CPU parallelism utilization. 65 // 66 // The main implementation complication is that we need to be very careful during 67 // spinning->non-spinning thread transition. This transition can race with submission 68 // of a new goroutine, and either one part or another needs to unpark another worker 69 // thread. If they both fail to do that, we can end up with semi-persistent CPU 70 // underutilization. The general pattern for goroutine readying is: submit a goroutine 71 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. 72 // The general pattern for spinning->non-spinning transition is: decrement nmspinning, 73 // #StoreLoad-style memory barrier, check all per-P work queues for new work. 74 // Note that all this complexity does not apply to global run queue as we are not 75 // sloppy about thread unparking when submitting to global queue. Also see comments 76 // for nmspinning manipulation. 77 78 var ( 79 m0 m 80 g0 g 81 raceprocctx0 uintptr 82 83 //@aghosn for the enclave. 84 mglobal *m = nil 85 ) 86 87 //go:linkname runtime_init runtime.init 88 func runtime_init() 89 90 //go:linkname main_init main.init 91 func main_init() 92 93 // main_init_done is a signal used by cgocallbackg that initialization 94 // has been completed. It is made before _cgo_notify_runtime_init_done, 95 // so all cgo calls can rely on it existing. When main_init is complete, 96 // it is closed, meaning cgocallbackg can reliably receive from it. 97 var main_init_done chan bool 98 99 //go:linkname main_main main.main 100 func main_main() 101 102 // mainStarted indicates that the main M has started. 103 var mainStarted bool 104 105 // runtimeInitTime is the nanotime() at which the runtime started. 106 var runtimeInitTime int64 107 108 // Value to use for signal mask for newly created M's. 109 var initSigmask sigset 110 111 // The main goroutine. 112 func main() { 113 g := getg() 114 115 // Racectx of m0->g0 is used only as the parent of the main goroutine. 116 // It must not be used for anything else. 117 g.m.g0.racectx = 0 118 119 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 120 // Using decimal instead of binary GB and MB because 121 // they look nicer in the stack overflow failure message. 122 if sys.PtrSize == 8 { 123 maxstacksize = 1000000000 124 } else { 125 maxstacksize = 250000000 126 } 127 128 // Allow newproc to start new Ms. 129 mainStarted = true 130 131 if isEnclave { 132 goto skipsysmon 133 } 134 systemstack(func() { 135 newm(sysmon, nil) 136 }) 137 skipsysmon: 138 // Lock the main goroutine onto this, the main OS thread, 139 // during initialization. Most programs won't care, but a few 140 // do require certain calls to be made by the main thread. 141 // Those can arrange for main.main to run in the main thread 142 // by calling runtime.LockOSThread during initialization 143 // to preserve the lock. 144 lockOSThread() 145 146 if (!isEnclave && g.m != &m0) || (isEnclave && g.m != mglobal) { 147 throw("runtime.main not on m0") 148 } 149 150 runtime_init() // must be before defer 151 if nanotime() == 0 { 152 throw("nanotime returning zero") 153 } 154 155 // Defer unlock so that runtime.Goexit during init does the unlock too. 156 needUnlock := true 157 defer func() { 158 if needUnlock { 159 unlockOSThread() 160 } 161 }() 162 163 // Record when the world started. Must be after runtime_init 164 // because nanotime on some platforms depends on startNano. 165 runtimeInitTime = nanotime() 166 167 gcenable() 168 169 main_init_done = make(chan bool) 170 if iscgo { 171 if _cgo_thread_start == nil { 172 throw("_cgo_thread_start missing") 173 } 174 if GOOS != "windows" { 175 if _cgo_setenv == nil { 176 throw("_cgo_setenv missing") 177 } 178 if _cgo_unsetenv == nil { 179 throw("_cgo_unsetenv missing") 180 } 181 } 182 if _cgo_notify_runtime_init_done == nil { 183 throw("_cgo_notify_runtime_init_done missing") 184 } 185 // Start the template thread in case we enter Go from 186 // a C-created thread and need to create a new thread. 187 startTemplateThread() 188 cgocall(_cgo_notify_runtime_init_done, nil) 189 } 190 191 if isEnclave { 192 InitAllcg() 193 } 194 195 fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 196 fn() 197 198 close(main_init_done) 199 200 needUnlock = false 201 unlockOSThread() 202 203 if isarchive || islibrary { 204 // A program compiled with -buildmode=c-archive or c-shared 205 // has a main, but it is not executed. 206 return 207 } 208 209 fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 210 fn() 211 if raceenabled { 212 racefini() 213 } 214 215 // Make racy client program work: if panicking on 216 // another goroutine at the same time as main returns, 217 // let the other goroutine finish printing the panic trace. 218 // Once it does, it will exit. See issues 3934 and 20018. 219 if atomic.Load(&runningPanicDefers) != 0 { 220 // Running deferred functions should not take long. 221 for c := 0; c < 1000; c++ { 222 if atomic.Load(&runningPanicDefers) == 0 { 223 break 224 } 225 Gosched() 226 } 227 } 228 if atomic.Load(&panicking) != 0 { 229 gopark(nil, nil, "panicwait", traceEvGoStop, 1) 230 } 231 232 exit(0) 233 for { 234 var x *int32 235 *x = 0 236 } 237 } 238 239 // os_beforeExit is called from os.Exit(0). 240 //go:linkname os_beforeExit os.runtime_beforeExit 241 func os_beforeExit() { 242 if raceenabled { 243 racefini() 244 } 245 } 246 247 // start forcegc helper goroutine 248 func init() { 249 go forcegchelper() 250 } 251 252 func forcegchelper() { 253 forcegc.g = getg() 254 for { 255 lock(&forcegc.lock) 256 if forcegc.idle != 0 { 257 throw("forcegc: phase error") 258 } 259 atomic.Store(&forcegc.idle, 1) 260 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1) 261 // this goroutine is explicitly resumed by sysmon 262 if debug.gctrace > 0 { 263 println("GC forced") 264 } 265 // Time-triggered, fully concurrent. 266 gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()}) 267 } 268 } 269 270 //go:nosplit 271 272 // Gosched yields the processor, allowing other goroutines to run. It does not 273 // suspend the current goroutine, so execution resumes automatically. 274 func Gosched() { 275 mcall(gosched_m) 276 } 277 278 // goschedguarded yields the processor like gosched, but also checks 279 // for forbidden states and opts out of the yield in those cases. 280 //go:nosplit 281 func goschedguarded() { 282 mcall(goschedguarded_m) 283 } 284 285 // Puts the current goroutine into a waiting state and calls unlockf. 286 // If unlockf returns false, the goroutine is resumed. 287 // unlockf must not access this G's stack, as it may be moved between 288 // the call to gopark and the call to unlockf. 289 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) { 290 mp := acquirem() 291 gp := mp.curg 292 if gp == nil { 293 panic("gp is nil in gopark") 294 } 295 status := readgstatus(gp) 296 if status != _Grunning && status != _Gscanrunning { 297 throw("gopark: bad g status") 298 } 299 mp.waitlock = lock 300 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) 301 gp.waitreason = reason 302 mp.waittraceev = traceEv 303 mp.waittraceskip = traceskip 304 releasem(mp) 305 // can't do anything that might move the G between Ms here. 306 mcall(park_m) 307 } 308 309 // Puts the current goroutine into a waiting state and unlocks the lock. 310 // The goroutine can be made runnable again by calling goready(gp). 311 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) { 312 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 313 } 314 315 func goready(gp *g, traceskip int) { 316 systemstack(func() { 317 ready(gp, traceskip, true) 318 }) 319 } 320 321 func goready1(gp *g, traceskip int) { 322 systemstack(func() { 323 ready(gp, traceskip, false) 324 }) 325 } 326 327 //go:nosplit 328 func acquireSudog() *sudog { 329 // Delicate dance: the semaphore implementation calls 330 // acquireSudog, acquireSudog calls new(sudog), 331 // new calls malloc, malloc can call the garbage collector, 332 // and the garbage collector calls the semaphore implementation 333 // in stopTheWorld. 334 // Break the cycle by doing acquirem/releasem around new(sudog). 335 // The acquirem/releasem increments m.locks during new(sudog), 336 // which keeps the garbage collector from being invoked. 337 mp := acquirem() 338 pp := mp.p.ptr() 339 if len(pp.sudogcache) == 0 { 340 lock(&sched.sudoglock) 341 // First, try to grab a batch from central cache. 342 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 343 s := sched.sudogcache 344 sched.sudogcache = s.next 345 s.next = nil 346 pp.sudogcache = append(pp.sudogcache, s) 347 } 348 unlock(&sched.sudoglock) 349 // If the central cache is empty, allocate a new one. 350 if len(pp.sudogcache) == 0 { 351 pp.sudogcache = append(pp.sudogcache, new(sudog)) 352 } 353 } 354 n := len(pp.sudogcache) 355 s := pp.sudogcache[n-1] 356 pp.sudogcache[n-1] = nil 357 pp.sudogcache = pp.sudogcache[:n-1] 358 if s.elem != nil { 359 throw("acquireSudog: found s.elem != nil in cache") 360 } 361 s.id = -1 362 releasem(mp) 363 return s 364 } 365 366 //go:nosplit 367 func releaseSudog(s *sudog) { 368 if s.id != -1 { 369 throw("runtime: sudog from pool released.") 370 } 371 if s.elem != nil { 372 throw("runtime: sudog with non-nil elem") 373 } 374 if s.isSelect { 375 throw("runtime: sudog with non-false isSelect") 376 } 377 if s.next != nil { 378 throw("runtime: sudog with non-nil next") 379 } 380 if s.prev != nil { 381 throw("runtime: sudog with non-nil prev") 382 } 383 if s.waitlink != nil { 384 throw("runtime: sudog with non-nil waitlink") 385 } 386 if s.c != nil { 387 throw("runtime: sudog with non-nil c") 388 } 389 gp := getg() 390 if gp.param != nil { 391 throw("runtime: releaseSudog with non-nil gp.param") 392 } 393 mp := acquirem() // avoid rescheduling to another P 394 pp := mp.p.ptr() 395 if len(pp.sudogcache) == cap(pp.sudogcache) { 396 // Transfer half of local cache to the central cache. 397 var first, last *sudog 398 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 399 n := len(pp.sudogcache) 400 p := pp.sudogcache[n-1] 401 pp.sudogcache[n-1] = nil 402 pp.sudogcache = pp.sudogcache[:n-1] 403 if first == nil { 404 first = p 405 } else { 406 last.next = p 407 } 408 last = p 409 } 410 lock(&sched.sudoglock) 411 last.next = sched.sudogcache 412 sched.sudogcache = first 413 unlock(&sched.sudoglock) 414 } 415 pp.sudogcache = append(pp.sudogcache, s) 416 releasem(mp) 417 } 418 419 // funcPC returns the entry PC of the function f. 420 // It assumes that f is a func value. Otherwise the behavior is undefined. 421 // CAREFUL: In programs with plugins, funcPC can return different values 422 // for the same function (because there are actually multiple copies of 423 // the same function in the address space). To be safe, don't use the 424 // results of this function in any == expression. It is only safe to 425 // use the result as an address at which to start executing code. 426 //go:nosplit 427 func funcPC(f interface{}) uintptr { 428 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize)) 429 } 430 431 // called from assembly 432 func badmcall(fn func(*g)) { 433 throw("runtime: mcall called on m->g0 stack") 434 } 435 436 func badmcall2(fn func(*g)) { 437 throw("runtime: mcall function returned") 438 } 439 440 func badreflectcall() { 441 panic(plainError("arg size to reflect.call more than 1GB")) 442 } 443 444 var badmorestackg0Msg = "fatal: morestack on g0\n" 445 446 //go:nosplit 447 //go:nowritebarrierrec 448 func badmorestackg0() { 449 sp := stringStructOf(&badmorestackg0Msg) 450 write(2, sp.str, int32(sp.len)) 451 } 452 453 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n" 454 455 //go:nosplit 456 //go:nowritebarrierrec 457 func badmorestackgsignal() { 458 sp := stringStructOf(&badmorestackgsignalMsg) 459 write(2, sp.str, int32(sp.len)) 460 } 461 462 //go:nosplit 463 func badctxt() { 464 throw("ctxt != 0") 465 } 466 467 func lockedOSThread() bool { 468 gp := getg() 469 return gp.lockedm != 0 && gp.m.lockedg != 0 470 } 471 472 var ( 473 allgs []*g 474 allglock mutex 475 ) 476 477 func allgadd(gp *g) { 478 if readgstatus(gp) == _Gidle { 479 throw("allgadd: bad status Gidle") 480 } 481 482 lock(&allglock) 483 allgs = append(allgs, gp) 484 allglen = uintptr(len(allgs)) 485 unlock(&allglock) 486 } 487 488 const ( 489 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 490 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 491 _GoidCacheBatch = 16 492 ) 493 494 // The bootstrap sequence is: 495 // 496 // call osinit 497 // call schedinit 498 // make & queue new G 499 // call runtime·mstart 500 // 501 // The new G calls runtime·main. 502 func schedinit() { 503 // raceinit must be the first call to race detector. 504 // In particular, it must be done before mallocinit below calls racemapshadow. 505 _g_ := getg() 506 507 if raceenabled { 508 _g_.racectx, raceprocctx0 = raceinit() 509 } 510 511 sched.maxmcount = 10000 512 tracebackinit() 513 moduledataverify() 514 stackinit() 515 mallocinit() 516 mcommoninit(_g_.m) // TODO(aghosn) apparently the stack is allocated here. 517 alginit() // maps must not be used before this call 518 modulesinit() // provides activeModules 519 typelinksinit() // uses maps, activeModules 520 itabsinit() // uses activeModules 521 522 if !isEnclave { 523 msigsave(_g_.m) 524 initSigmask = _g_.m.sigmask 525 } 526 527 goargs() 528 goenvs() 529 parsedebugvars() 530 gcinit() 531 532 sched.lastpoll = uint64(nanotime()) 533 procs := ncpu 534 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { 535 procs = n 536 } 537 if isEnclave { 538 UnsafeAllocator.Initialize(Cooprt.StartUnsafe, Cooprt.SizeUnsafe) 539 procs = 2 //TODO modify this for more threads in enclave. 540 sched.lastpoll = ENCL_NPOLLING 541 Cleanup_xstates() 542 } 543 544 if procresize(procs) != nil { 545 throw("unknown runnable goroutine during bootstrap") 546 } 547 548 // For cgocheck > 1, we turn on the write barrier at all times 549 // and check all pointer writes. We can't do this until after 550 // procresize because the write barrier needs a P. 551 if debug.cgocheck > 1 { 552 writeBarrier.cgo = true 553 writeBarrier.enabled = true 554 for _, p := range allp { 555 p.wbBuf.reset() 556 } 557 } 558 559 if buildVersion == "" { 560 // Condition should never trigger. This code just serves 561 // to ensure runtime·buildVersion is kept in the resulting binary. 562 buildVersion = "unknown" 563 } 564 } 565 566 func dumpgstatus(gp *g) { 567 _g_ := getg() 568 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 569 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 570 } 571 572 func checkmcount() { 573 // sched lock is held 574 if mcount() > sched.maxmcount { 575 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 576 throw("thread exhaustion") 577 } 578 } 579 580 func mcommoninit(mp *m) { 581 _g_ := getg() 582 583 // g0 stack won't make sense for user (and is not necessary unwindable). 584 if _g_ != _g_.m.g0 { 585 callers(1, mp.createstack[:]) 586 } 587 588 lock(&sched.lock) 589 if sched.mnext+1 < sched.mnext { 590 throw("runtime: thread ID overflow") 591 } 592 mp.id = sched.mnext 593 sched.mnext++ 594 checkmcount() 595 596 mp.fastrand[0] = 1597334677 * uint32(mp.id) 597 if isEnclave { 598 mp.fastrand[1] = uint32(1) 599 } else { 600 mp.fastrand[1] = uint32(cputicks()) 601 } 602 if mp.fastrand[0]|mp.fastrand[1] == 0 { 603 mp.fastrand[1] = 1 604 } 605 606 mpreinit(mp) 607 if mp.gsignal != nil { 608 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 609 } 610 611 // Add to allm so garbage collector doesn't free g->m 612 // when it is just in a register or thread-local storage. 613 mp.alllink = allm 614 615 // NumCgoCall() iterates over allm w/o schedlock, 616 // so we need to publish it safely. 617 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 618 unlock(&sched.lock) 619 620 // Allocate memory to hold a cgo traceback if the cgo call crashes. 621 if iscgo || GOOS == "solaris" || GOOS == "windows" { 622 mp.cgoCallers = new(cgoCallers) 623 } 624 } 625 626 // Mark gp ready to run. 627 func ready(gp *g, traceskip int, next bool) { 628 if trace.enabled { 629 traceGoUnpark(gp, traceskip) 630 } 631 632 status := readgstatus(gp) 633 634 // Mark runnable. 635 _g_ := getg() 636 _g_.m.locks++ // disable preemption because it can be holding p in a local var 637 if status&^_Gscan != _Gwaiting { 638 dumpgstatus(gp) 639 throw("bad g->status in ready") 640 } 641 642 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 643 casgstatus(gp, _Gwaiting, _Grunnable) 644 runqput(_g_.m.p.ptr(), gp, next) 645 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 646 wakep() 647 } 648 _g_.m.locks-- 649 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack 650 _g_.stackguard0 = stackPreempt 651 } 652 } 653 654 func gcprocs() int32 { 655 // Figure out how many CPUs to use during GC. 656 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 657 lock(&sched.lock) 658 n := gomaxprocs 659 if n > ncpu { 660 n = ncpu 661 } 662 if n > _MaxGcproc { 663 n = _MaxGcproc 664 } 665 if n > sched.nmidle+1 { // one M is currently running 666 n = sched.nmidle + 1 667 } 668 unlock(&sched.lock) 669 return n 670 } 671 672 func needaddgcproc() bool { 673 lock(&sched.lock) 674 n := gomaxprocs 675 if n > ncpu { 676 n = ncpu 677 } 678 if n > _MaxGcproc { 679 n = _MaxGcproc 680 } 681 n -= sched.nmidle + 1 // one M is currently running 682 unlock(&sched.lock) 683 return n > 0 684 } 685 686 func helpgc(nproc int32) { 687 _g_ := getg() 688 lock(&sched.lock) 689 pos := 0 690 for n := int32(1); n < nproc; n++ { // one M is currently running 691 if allp[pos].mcache == _g_.m.mcache { 692 pos++ 693 } 694 mp := mget() 695 if mp == nil { 696 throw("gcprocs inconsistency") 697 } 698 mp.helpgc = n 699 mp.p.set(allp[pos]) 700 mp.mcache = allp[pos].mcache 701 pos++ 702 notewakeup(&mp.park) 703 } 704 unlock(&sched.lock) 705 } 706 707 // freezeStopWait is a large value that freezetheworld sets 708 // sched.stopwait to in order to request that all Gs permanently stop. 709 const freezeStopWait = 0x7fffffff 710 711 // freezing is set to non-zero if the runtime is trying to freeze the 712 // world. 713 var freezing uint32 714 715 // Similar to stopTheWorld but best-effort and can be called several times. 716 // There is no reverse operation, used during crashing. 717 // This function must not lock any mutexes. 718 func freezetheworld() { 719 atomic.Store(&freezing, 1) 720 // stopwait and preemption requests can be lost 721 // due to races with concurrently executing threads, 722 // so try several times 723 for i := 0; i < 5; i++ { 724 // this should tell the scheduler to not start any new goroutines 725 sched.stopwait = freezeStopWait 726 atomic.Store(&sched.gcwaiting, 1) 727 // this should stop running goroutines 728 if !preemptall() { 729 break // no running goroutines 730 } 731 usleep(1000) 732 } 733 // to be sure 734 usleep(1000) 735 preemptall() 736 usleep(1000) 737 } 738 739 func isscanstatus(status uint32) bool { 740 if status == _Gscan { 741 throw("isscanstatus: Bad status Gscan") 742 } 743 return status&_Gscan == _Gscan 744 } 745 746 // All reads and writes of g's status go through readgstatus, casgstatus 747 // castogscanstatus, casfrom_Gscanstatus. 748 //go:nosplit 749 func readgstatus(gp *g) uint32 { 750 return atomic.Load(&gp.atomicstatus) 751 } 752 753 // Ownership of gcscanvalid: 754 // 755 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), 756 // then gp owns gp.gcscanvalid, and other goroutines must not modify it. 757 // 758 // Otherwise, a second goroutine can lock the scan state by setting _Gscan 759 // in the status bit and then modify gcscanvalid, and then unlock the scan state. 760 // 761 // Note that the first condition implies an exception to the second: 762 // if a second goroutine changes gp's status to _Grunning|_Gscan, 763 // that second goroutine still does not have the right to modify gcscanvalid. 764 765 // The Gscanstatuses are acting like locks and this releases them. 766 // If it proves to be a performance hit we should be able to make these 767 // simple atomic stores but for now we are going to throw if 768 // we see an inconsistent state. 769 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 770 success := false 771 772 // Check that transition is valid. 773 switch oldval { 774 default: 775 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 776 dumpgstatus(gp) 777 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 778 case _Gscanrunnable, 779 _Gscanwaiting, 780 _Gscanrunning, 781 _Gscansyscall: 782 if newval == oldval&^_Gscan { 783 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 784 } 785 } 786 if !success { 787 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 788 dumpgstatus(gp) 789 throw("casfrom_Gscanstatus: gp->status is not in scan state") 790 } 791 } 792 793 // This will return false if the gp is not in the expected status and the cas fails. 794 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 795 func castogscanstatus(gp *g, oldval, newval uint32) bool { 796 switch oldval { 797 case _Grunnable, 798 _Grunning, 799 _Gwaiting, 800 _Gsyscall: 801 if newval == oldval|_Gscan { 802 return atomic.Cas(&gp.atomicstatus, oldval, newval) 803 } 804 } 805 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 806 throw("castogscanstatus") 807 panic("not reached") 808 } 809 810 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 811 // and casfrom_Gscanstatus instead. 812 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 813 // put it in the Gscan state is finished. 814 //go:nosplit 815 func casgstatus(gp *g, oldval, newval uint32) { 816 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 817 systemstack(func() { 818 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 819 throw("casgstatus: bad incoming values") 820 }) 821 } 822 823 if oldval == _Grunning && gp.gcscanvalid { 824 // If oldvall == _Grunning, then the actual status must be 825 // _Grunning or _Grunning|_Gscan; either way, 826 // we own gp.gcscanvalid, so it's safe to read. 827 // gp.gcscanvalid must not be true when we are running. 828 systemstack(func() { 829 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n") 830 throw("casgstatus") 831 }) 832 } 833 834 // See http://golang.org/cl/21503 for justification of the yield delay. 835 const yieldDelay = 5 * 1000 836 var nextYield int64 837 838 // loop if gp->atomicstatus is in a scan state giving 839 // GC time to finish and change the state to oldval. 840 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { 841 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 842 systemstack(func() { 843 throw("casgstatus: waiting for Gwaiting but is Grunnable") 844 }) 845 } 846 // Help GC if needed. 847 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { 848 // gp.preemptscan = false 849 // systemstack(func() { 850 // gcphasework(gp) 851 // }) 852 // } 853 // But meanwhile just yield. 854 if i == 0 { 855 nextYield = nanotime() + yieldDelay 856 } 857 if nanotime() < nextYield { 858 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { 859 procyield(1) 860 } 861 } else if !isEnclave { 862 osyield() 863 nextYield = nanotime() + yieldDelay/2 864 } 865 } 866 if newval == _Grunning { 867 gp.gcscanvalid = false 868 } 869 } 870 871 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 872 // Returns old status. Cannot call casgstatus directly, because we are racing with an 873 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 874 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 875 // it would loop waiting for the status to go back to Gwaiting, which it never will. 876 //go:nosplit 877 func casgcopystack(gp *g) uint32 { 878 for { 879 oldstatus := readgstatus(gp) &^ _Gscan 880 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 881 throw("copystack: bad status, not Gwaiting or Grunnable") 882 } 883 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 884 return oldstatus 885 } 886 } 887 } 888 889 // scang blocks until gp's stack has been scanned. 890 // It might be scanned by scang or it might be scanned by the goroutine itself. 891 // Either way, the stack scan has completed when scang returns. 892 func scang(gp *g, gcw *gcWork) { 893 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone. 894 // Nothing is racing with us now, but gcscandone might be set to true left over 895 // from an earlier round of stack scanning (we scan twice per GC). 896 // We use gcscandone to record whether the scan has been done during this round. 897 898 gp.gcscandone = false 899 900 // See http://golang.org/cl/21503 for justification of the yield delay. 901 const yieldDelay = 10 * 1000 902 var nextYield int64 903 904 // Endeavor to get gcscandone set to true, 905 // either by doing the stack scan ourselves or by coercing gp to scan itself. 906 // gp.gcscandone can transition from false to true when we're not looking 907 // (if we asked for preemption), so any time we lock the status using 908 // castogscanstatus we have to double-check that the scan is still not done. 909 loop: 910 for i := 0; !gp.gcscandone; i++ { 911 switch s := readgstatus(gp); s { 912 default: 913 dumpgstatus(gp) 914 throw("stopg: invalid status") 915 916 case _Gdead: 917 // No stack. 918 gp.gcscandone = true 919 break loop 920 921 case _Gcopystack: 922 // Stack being switched. Go around again. 923 924 case _Grunnable, _Gsyscall, _Gwaiting: 925 // Claim goroutine by setting scan bit. 926 // Racing with execution or readying of gp. 927 // The scan bit keeps them from running 928 // the goroutine until we're done. 929 if castogscanstatus(gp, s, s|_Gscan) { 930 if !gp.gcscandone { 931 scanstack(gp, gcw) 932 gp.gcscandone = true 933 } 934 restartg(gp) 935 break loop 936 } 937 938 case _Gscanwaiting: 939 // newstack is doing a scan for us right now. Wait. 940 941 case _Grunning: 942 // Goroutine running. Try to preempt execution so it can scan itself. 943 // The preemption handler (in newstack) does the actual scan. 944 945 // Optimization: if there is already a pending preemption request 946 // (from the previous loop iteration), don't bother with the atomics. 947 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt { 948 break 949 } 950 951 // Ask for preemption and self scan. 952 if castogscanstatus(gp, _Grunning, _Gscanrunning) { 953 if !gp.gcscandone { 954 gp.preemptscan = true 955 gp.preempt = true 956 gp.stackguard0 = stackPreempt 957 } 958 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning) 959 } 960 } 961 962 if i == 0 { 963 nextYield = nanotime() + yieldDelay 964 } 965 if nanotime() < nextYield { 966 procyield(10) 967 } else { 968 osyield() 969 nextYield = nanotime() + yieldDelay/2 970 } 971 } 972 973 gp.preemptscan = false // cancel scan request if no longer needed 974 } 975 976 // The GC requests that this routine be moved from a scanmumble state to a mumble state. 977 func restartg(gp *g) { 978 s := readgstatus(gp) 979 switch s { 980 default: 981 dumpgstatus(gp) 982 throw("restartg: unexpected status") 983 984 case _Gdead: 985 // ok 986 987 case _Gscanrunnable, 988 _Gscanwaiting, 989 _Gscansyscall: 990 casfrom_Gscanstatus(gp, s, s&^_Gscan) 991 } 992 } 993 994 // stopTheWorld stops all P's from executing goroutines, interrupting 995 // all goroutines at GC safe points and records reason as the reason 996 // for the stop. On return, only the current goroutine's P is running. 997 // stopTheWorld must not be called from a system stack and the caller 998 // must not hold worldsema. The caller must call startTheWorld when 999 // other P's should resume execution. 1000 // 1001 // stopTheWorld is safe for multiple goroutines to call at the 1002 // same time. Each will execute its own stop, and the stops will 1003 // be serialized. 1004 // 1005 // This is also used by routines that do stack dumps. If the system is 1006 // in panic or being exited, this may not reliably stop all 1007 // goroutines. 1008 func stopTheWorld(reason string) { 1009 semacquire(&worldsema) 1010 getg().m.preemptoff = reason 1011 systemstack(stopTheWorldWithSema) 1012 } 1013 1014 // startTheWorld undoes the effects of stopTheWorld. 1015 func startTheWorld() { 1016 systemstack(func() { startTheWorldWithSema(false) }) 1017 // worldsema must be held over startTheWorldWithSema to ensure 1018 // gomaxprocs cannot change while worldsema is held. 1019 semrelease(&worldsema) 1020 getg().m.preemptoff = "" 1021 } 1022 1023 // Holding worldsema grants an M the right to try to stop the world 1024 // and prevents gomaxprocs from changing concurrently. 1025 var worldsema uint32 = 1 1026 1027 // stopTheWorldWithSema is the core implementation of stopTheWorld. 1028 // The caller is responsible for acquiring worldsema and disabling 1029 // preemption first and then should stopTheWorldWithSema on the system 1030 // stack: 1031 // 1032 // semacquire(&worldsema, 0) 1033 // m.preemptoff = "reason" 1034 // systemstack(stopTheWorldWithSema) 1035 // 1036 // When finished, the caller must either call startTheWorld or undo 1037 // these three operations separately: 1038 // 1039 // m.preemptoff = "" 1040 // systemstack(startTheWorldWithSema) 1041 // semrelease(&worldsema) 1042 // 1043 // It is allowed to acquire worldsema once and then execute multiple 1044 // startTheWorldWithSema/stopTheWorldWithSema pairs. 1045 // Other P's are able to execute between successive calls to 1046 // startTheWorldWithSema and stopTheWorldWithSema. 1047 // Holding worldsema causes any other goroutines invoking 1048 // stopTheWorld to block. 1049 func stopTheWorldWithSema() { 1050 _g_ := getg() 1051 1052 // If we hold a lock, then we won't be able to stop another M 1053 // that is blocked trying to acquire the lock. 1054 if _g_.m.locks > 0 { 1055 throw("stopTheWorld: holding locks") 1056 } 1057 1058 lock(&sched.lock) 1059 sched.stopwait = gomaxprocs 1060 atomic.Store(&sched.gcwaiting, 1) 1061 preemptall() 1062 // stop current P 1063 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 1064 sched.stopwait-- 1065 // try to retake all P's in Psyscall status 1066 for _, p := range allp { 1067 s := p.status 1068 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 1069 if trace.enabled { 1070 traceGoSysBlock(p) 1071 traceProcStop(p) 1072 } 1073 p.syscalltick++ 1074 sched.stopwait-- 1075 } 1076 } 1077 // stop idle P's 1078 for { 1079 p := pidleget() 1080 if p == nil { 1081 break 1082 } 1083 p.status = _Pgcstop 1084 sched.stopwait-- 1085 } 1086 wait := sched.stopwait > 0 1087 unlock(&sched.lock) 1088 1089 // wait for remaining P's to stop voluntarily 1090 if wait { 1091 for { 1092 // wait for 100us, then try to re-preempt in case of any races 1093 if notetsleep(&sched.stopnote, 100*1000) { 1094 noteclear(&sched.stopnote) 1095 break 1096 } 1097 preemptall() 1098 } 1099 } 1100 1101 // sanity checks 1102 bad := "" 1103 if sched.stopwait != 0 { 1104 bad = "stopTheWorld: not stopped (stopwait != 0)" 1105 } else { 1106 for _, p := range allp { 1107 if p.status != _Pgcstop { 1108 bad = "stopTheWorld: not stopped (status != _Pgcstop)" 1109 } 1110 } 1111 } 1112 if atomic.Load(&freezing) != 0 { 1113 // Some other thread is panicking. This can cause the 1114 // sanity checks above to fail if the panic happens in 1115 // the signal handler on a stopped thread. Either way, 1116 // we should halt this thread. 1117 lock(&deadlock) 1118 lock(&deadlock) 1119 } 1120 if bad != "" { 1121 throw(bad) 1122 } 1123 } 1124 1125 func mhelpgc() { 1126 _g_ := getg() 1127 _g_.m.helpgc = -1 1128 } 1129 1130 func startTheWorldWithSema(emitTraceEvent bool) int64 { 1131 _g_ := getg() 1132 1133 _g_.m.locks++ // disable preemption because it can be holding p in a local var 1134 if netpollinited() { 1135 gp := netpoll(false) // non-blocking 1136 injectglist(gp) 1137 } 1138 add := needaddgcproc() 1139 lock(&sched.lock) 1140 1141 procs := gomaxprocs 1142 if newprocs != 0 { 1143 procs = newprocs 1144 newprocs = 0 1145 } 1146 p1 := procresize(procs) 1147 sched.gcwaiting = 0 1148 if sched.sysmonwait != 0 { 1149 sched.sysmonwait = 0 1150 notewakeup(&sched.sysmonnote) 1151 } 1152 unlock(&sched.lock) 1153 1154 for p1 != nil { 1155 p := p1 1156 p1 = p1.link.ptr() 1157 if p.m != 0 { 1158 mp := p.m.ptr() 1159 p.m = 0 1160 if mp.nextp != 0 { 1161 throw("startTheWorld: inconsistent mp->nextp") 1162 } 1163 mp.nextp.set(p) 1164 notewakeup(&mp.park) 1165 } else { 1166 // Start M to run P. Do not start another M below. 1167 newm(nil, p) 1168 add = false 1169 } 1170 } 1171 1172 // Capture start-the-world time before doing clean-up tasks. 1173 startTime := nanotime() 1174 if emitTraceEvent { 1175 traceGCSTWDone() 1176 } 1177 1178 // Wakeup an additional proc in case we have excessive runnable goroutines 1179 // in local queues or in the global queue. If we don't, the proc will park itself. 1180 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 1181 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 1182 wakep() 1183 } 1184 1185 if add { 1186 // If GC could have used another helper proc, start one now, 1187 // in the hope that it will be available next time. 1188 // It would have been even better to start it before the collection, 1189 // but doing so requires allocating memory, so it's tricky to 1190 // coordinate. This lazy approach works out in practice: 1191 // we don't mind if the first couple gc rounds don't have quite 1192 // the maximum number of procs. 1193 newm(mhelpgc, nil) 1194 } 1195 _g_.m.locks-- 1196 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1197 _g_.stackguard0 = stackPreempt 1198 } 1199 1200 return startTime 1201 } 1202 1203 // Called to start an M. 1204 // 1205 // This must not split the stack because we may not even have stack 1206 // bounds set up yet. 1207 // 1208 // May run during STW (because it doesn't have a P yet), so write 1209 // barriers are not allowed. 1210 // 1211 //go:nosplit 1212 //go:nowritebarrierrec 1213 func mstart() { 1214 _g_ := getg() 1215 1216 osStack := _g_.stack.lo == 0 1217 if osStack { 1218 // Initialize stack bounds from system stack. 1219 // Cgo may have left stack size in stack.hi. 1220 size := _g_.stack.hi 1221 if size == 0 { 1222 size = 8192 * sys.StackGuardMultiplier 1223 } 1224 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1225 _g_.stack.lo = _g_.stack.hi - size + 1024 1226 } 1227 // Initialize stack guards so that we can start calling 1228 // both Go and C functions with stack growth prologues. 1229 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1230 _g_.stackguard1 = _g_.stackguard0 1231 1232 mstart1(0) 1233 1234 // Exit this thread. 1235 if GOOS == "windows" || GOOS == "solaris" || GOOS == "plan9" { 1236 // Window, Solaris and Plan 9 always system-allocate 1237 // the stack, but put it in _g_.stack before mstart, 1238 // so the logic above hasn't set osStack yet. 1239 osStack = true 1240 } 1241 mexit(osStack) 1242 } 1243 1244 func mstart1(dummy int32) { 1245 _g_ := getg() 1246 1247 if _g_ != _g_.m.g0 { 1248 throw("bad runtime·mstart") 1249 } 1250 1251 // Record the caller for use as the top of stack in mcall and 1252 // for terminating the thread. 1253 // We're never coming back to mstart1 after we call schedule, 1254 // so other calls can reuse the current frame. 1255 save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy))) 1256 asminit() 1257 minit() 1258 1259 // Install signal handlers; after minit so that minit can 1260 // prepare the thread to be able to handle the signals. 1261 if (!isEnclave && _g_.m == &m0) || (isEnclave && _g_.m == mglobal) { 1262 mstartm0() 1263 } 1264 1265 if fn := _g_.m.mstartfn; fn != nil { 1266 fn() 1267 } 1268 1269 if _g_.m.helpgc != 0 { 1270 _g_.m.helpgc = 0 1271 stopm() 1272 } else if (!isEnclave && _g_.m != &m0) || (isEnclave && _g_.m != mglobal) { 1273 acquirep(_g_.m.nextp.ptr()) 1274 _g_.m.nextp = 0 1275 } 1276 schedule() 1277 } 1278 1279 // mstartm0 implements part of mstart1 that only runs on the m0. 1280 // 1281 // Write barriers are allowed here because we know the GC can't be 1282 // running yet, so they'll be no-ops. 1283 // 1284 //go:yeswritebarrierrec 1285 func mstartm0() { 1286 // Create an extra M for callbacks on threads not created by Go. 1287 if iscgo && !cgoHasExtraM { 1288 cgoHasExtraM = true 1289 newextram() 1290 } 1291 initsig(false) 1292 } 1293 1294 // mexit tears down and exits the current thread. 1295 // 1296 // Don't call this directly to exit the thread, since it must run at 1297 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to 1298 // unwind the stack to the point that exits the thread. 1299 // 1300 // It is entered with m.p != nil, so write barriers are allowed. It 1301 // will release the P before exiting. 1302 // 1303 //go:yeswritebarrierrec 1304 func mexit(osStack bool) { 1305 g := getg() 1306 m := g.m 1307 1308 if (!isSimulation && m == &m0) || (isEnclave && m == mglobal) { 1309 // This is the main thread. Just wedge it. 1310 // 1311 // On Linux, exiting the main thread puts the process 1312 // into a non-waitable zombie state. On Plan 9, 1313 // exiting the main thread unblocks wait even though 1314 // other threads are still running. On Solaris we can 1315 // neither exitThread nor return from mstart. Other 1316 // bad things probably happen on other platforms. 1317 // 1318 // We could try to clean up this M more before wedging 1319 // it, but that complicates signal handling. 1320 handoffp(releasep()) 1321 lock(&sched.lock) 1322 sched.nmfreed++ 1323 checkdead() 1324 unlock(&sched.lock) 1325 notesleep(&m.park) 1326 throw("locked m0 woke up") 1327 } 1328 1329 sigblock() 1330 unminit() 1331 1332 // Free the gsignal stack. 1333 if m.gsignal != nil { 1334 stackfree(m.gsignal.stack) 1335 } 1336 1337 // Remove m from allm. 1338 lock(&sched.lock) 1339 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink { 1340 if *pprev == m { 1341 *pprev = m.alllink 1342 goto found 1343 } 1344 } 1345 throw("m not found in allm") 1346 found: 1347 if !osStack { 1348 // Delay reaping m until it's done with the stack. 1349 // 1350 // If this is using an OS stack, the OS will free it 1351 // so there's no need for reaping. 1352 atomic.Store(&m.freeWait, 1) 1353 // Put m on the free list, though it will not be reaped until 1354 // freeWait is 0. Note that the free list must not be linked 1355 // through alllink because some functions walk allm without 1356 // locking, so may be using alllink. 1357 m.freelink = sched.freem 1358 sched.freem = m 1359 } 1360 unlock(&sched.lock) 1361 1362 // Release the P. 1363 handoffp(releasep()) 1364 // After this point we must not have write barriers. 1365 1366 // Invoke the deadlock detector. This must happen after 1367 // handoffp because it may have started a new M to take our 1368 // P's work. 1369 lock(&sched.lock) 1370 sched.nmfreed++ 1371 checkdead() 1372 unlock(&sched.lock) 1373 1374 if osStack { 1375 // Return from mstart and let the system thread 1376 // library free the g0 stack and terminate the thread. 1377 return 1378 } 1379 1380 // mstart is the thread's entry point, so there's nothing to 1381 // return to. Exit the thread directly. exitThread will clear 1382 // m.freeWait when it's done with the stack and the m can be 1383 // reaped. 1384 exitThread(&m.freeWait) 1385 } 1386 1387 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1388 // If a P is currently executing code, this will bring the P to a GC 1389 // safe point and execute fn on that P. If the P is not executing code 1390 // (it is idle or in a syscall), this will call fn(p) directly while 1391 // preventing the P from exiting its state. This does not ensure that 1392 // fn will run on every CPU executing Go code, but it acts as a global 1393 // memory barrier. GC uses this as a "ragged barrier." 1394 // 1395 // The caller must hold worldsema. 1396 // 1397 //go:systemstack 1398 func forEachP(fn func(*p)) { 1399 mp := acquirem() 1400 _p_ := getg().m.p.ptr() 1401 1402 lock(&sched.lock) 1403 if sched.safePointWait != 0 { 1404 throw("forEachP: sched.safePointWait != 0") 1405 } 1406 sched.safePointWait = gomaxprocs - 1 1407 sched.safePointFn = fn 1408 1409 // Ask all Ps to run the safe point function. 1410 for _, p := range allp { 1411 if p != _p_ { 1412 atomic.Store(&p.runSafePointFn, 1) 1413 } 1414 } 1415 preemptall() 1416 1417 // Any P entering _Pidle or _Psyscall from now on will observe 1418 // p.runSafePointFn == 1 and will call runSafePointFn when 1419 // changing its status to _Pidle/_Psyscall. 1420 1421 // Run safe point function for all idle Ps. sched.pidle will 1422 // not change because we hold sched.lock. 1423 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1424 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1425 fn(p) 1426 sched.safePointWait-- 1427 } 1428 } 1429 1430 wait := sched.safePointWait > 0 1431 unlock(&sched.lock) 1432 1433 // Run fn for the current P. 1434 fn(_p_) 1435 1436 // Force Ps currently in _Psyscall into _Pidle and hand them 1437 // off to induce safe point function execution. 1438 for _, p := range allp { 1439 s := p.status 1440 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1441 if trace.enabled { 1442 traceGoSysBlock(p) 1443 traceProcStop(p) 1444 } 1445 p.syscalltick++ 1446 handoffp(p) 1447 } 1448 } 1449 1450 // Wait for remaining Ps to run fn. 1451 if wait { 1452 for { 1453 // Wait for 100us, then try to re-preempt in 1454 // case of any races. 1455 // 1456 // Requires system stack. 1457 if notetsleep(&sched.safePointNote, 100*1000) { 1458 noteclear(&sched.safePointNote) 1459 break 1460 } 1461 preemptall() 1462 } 1463 } 1464 if sched.safePointWait != 0 { 1465 throw("forEachP: not done") 1466 } 1467 for _, p := range allp { 1468 if p.runSafePointFn != 0 { 1469 throw("forEachP: P did not run fn") 1470 } 1471 } 1472 1473 lock(&sched.lock) 1474 sched.safePointFn = nil 1475 unlock(&sched.lock) 1476 releasem(mp) 1477 } 1478 1479 // runSafePointFn runs the safe point function, if any, for this P. 1480 // This should be called like 1481 // 1482 // if getg().m.p.runSafePointFn != 0 { 1483 // runSafePointFn() 1484 // } 1485 // 1486 // runSafePointFn must be checked on any transition in to _Pidle or 1487 // _Psyscall to avoid a race where forEachP sees that the P is running 1488 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1489 // nor the P run the safe-point function. 1490 func runSafePointFn() { 1491 p := getg().m.p.ptr() 1492 // Resolve the race between forEachP running the safe-point 1493 // function on this P's behalf and this P running the 1494 // safe-point function directly. 1495 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1496 return 1497 } 1498 sched.safePointFn(p) 1499 lock(&sched.lock) 1500 sched.safePointWait-- 1501 if sched.safePointWait == 0 { 1502 notewakeup(&sched.safePointNote) 1503 } 1504 unlock(&sched.lock) 1505 } 1506 1507 // When running with cgo, we call _cgo_thread_start 1508 // to start threads for us so that we can play nicely with 1509 // foreign code. 1510 var cgoThreadStart unsafe.Pointer 1511 1512 type cgothreadstart struct { 1513 g guintptr 1514 tls *uint64 1515 fn unsafe.Pointer 1516 } 1517 1518 // Allocate a new m unassociated with any thread. 1519 // Can use p for allocation context if needed. 1520 // fn is recorded as the new m's m.mstartfn. 1521 // 1522 // This function is allowed to have write barriers even if the caller 1523 // isn't because it borrows _p_. 1524 // 1525 //go:yeswritebarrierrec 1526 func allocm(_p_ *p, fn func()) *m { 1527 _g_ := getg() 1528 _g_.m.locks++ // disable GC because it can be called from sysmon 1529 if _g_.m.p == 0 { 1530 acquirep(_p_) // temporarily borrow p for mallocs in this function 1531 } 1532 1533 // Release the free M list. We need to do this somewhere and 1534 // this may free up a stack we can use. 1535 if sched.freem != nil { 1536 lock(&sched.lock) 1537 var newList *m 1538 for freem := sched.freem; freem != nil; { 1539 if freem.freeWait != 0 { 1540 next := freem.freelink 1541 freem.freelink = newList 1542 newList = freem 1543 freem = next 1544 continue 1545 } 1546 stackfree(freem.g0.stack) 1547 freem = freem.freelink 1548 } 1549 sched.freem = newList 1550 unlock(&sched.lock) 1551 } 1552 1553 var mp *m 1554 // Acquire the m from the Cooprt 1555 if isEnclave { 1556 //TODO use a lock 1557 id := -1 1558 var tcs *SgxTCSInfo 1559 for i := range Cooprt.Tcss { 1560 if Cooprt.Tcss[i].Used { 1561 continue 1562 } 1563 id = i 1564 tcs = &Cooprt.Tcss[i] 1565 tcs.Used = true 1566 break 1567 } 1568 //TODO unlock 1569 if tcs == nil { 1570 throw("Unable to acquire a tcs for the enclave") 1571 } 1572 //mp = tls - m_tls - 8 1573 addrp := tcs.Tls - 0x70 - 8 1574 mp = (*m)(unsafe.Pointer(addrp)) 1575 mp.procid = uint64(id) 1576 } else { 1577 mp = new(m) 1578 } 1579 mp.mstartfn = fn 1580 mcommoninit(mp) 1581 1582 // In case of cgo or Solaris, pthread_create will make us a stack. 1583 // Windows and Plan 9 will layout sched stack on OS stack. 1584 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" { 1585 mp.g0 = malg(-1) 1586 } else { 1587 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1588 } 1589 mp.g0.m = mp 1590 1591 if _p_ == _g_.m.p.ptr() { 1592 releasep() 1593 } 1594 _g_.m.locks-- 1595 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1596 _g_.stackguard0 = stackPreempt 1597 } 1598 1599 return mp 1600 } 1601 1602 // needm is called when a cgo callback happens on a 1603 // thread without an m (a thread not created by Go). 1604 // In this case, needm is expected to find an m to use 1605 // and return with m, g initialized correctly. 1606 // Since m and g are not set now (likely nil, but see below) 1607 // needm is limited in what routines it can call. In particular 1608 // it can only call nosplit functions (textflag 7) and cannot 1609 // do any scheduling that requires an m. 1610 // 1611 // In order to avoid needing heavy lifting here, we adopt 1612 // the following strategy: there is a stack of available m's 1613 // that can be stolen. Using compare-and-swap 1614 // to pop from the stack has ABA races, so we simulate 1615 // a lock by doing an exchange (via casp) to steal the stack 1616 // head and replace the top pointer with MLOCKED (1). 1617 // This serves as a simple spin lock that we can use even 1618 // without an m. The thread that locks the stack in this way 1619 // unlocks the stack by storing a valid stack head pointer. 1620 // 1621 // In order to make sure that there is always an m structure 1622 // available to be stolen, we maintain the invariant that there 1623 // is always one more than needed. At the beginning of the 1624 // program (if cgo is in use) the list is seeded with a single m. 1625 // If needm finds that it has taken the last m off the list, its job 1626 // is - once it has installed its own m so that it can do things like 1627 // allocate memory - to create a spare m and put it on the list. 1628 // 1629 // Each of these extra m's also has a g0 and a curg that are 1630 // pressed into service as the scheduling stack and current 1631 // goroutine for the duration of the cgo callback. 1632 // 1633 // When the callback is done with the m, it calls dropm to 1634 // put the m back on the list. 1635 //go:nosplit 1636 func needm(x byte) { 1637 if iscgo && !cgoHasExtraM { 1638 // Can happen if C/C++ code calls Go from a global ctor. 1639 // Can not throw, because scheduler is not initialized yet. 1640 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1641 exit(1) 1642 } 1643 1644 // Lock extra list, take head, unlock popped list. 1645 // nilokay=false is safe here because of the invariant above, 1646 // that the extra list always contains or will soon contain 1647 // at least one m. 1648 mp := lockextra(false) 1649 1650 // Set needextram when we've just emptied the list, 1651 // so that the eventual call into cgocallbackg will 1652 // allocate a new m for the extra list. We delay the 1653 // allocation until then so that it can be done 1654 // after exitsyscall makes sure it is okay to be 1655 // running at all (that is, there's no garbage collection 1656 // running right now). 1657 mp.needextram = mp.schedlink == 0 1658 extraMCount-- 1659 unlockextra(mp.schedlink.ptr()) 1660 1661 // Save and block signals before installing g. 1662 // Once g is installed, any incoming signals will try to execute, 1663 // but we won't have the sigaltstack settings and other data 1664 // set up appropriately until the end of minit, which will 1665 // unblock the signals. This is the same dance as when 1666 // starting a new m to run Go code via newosproc. 1667 msigsave(mp) 1668 sigblock() 1669 1670 // Install g (= m->g0) and set the stack bounds 1671 // to match the current stack. We don't actually know 1672 // how big the stack is, like we don't know how big any 1673 // scheduling stack is, but we assume there's at least 32 kB, 1674 // which is more than enough for us. 1675 setg(mp.g0) 1676 _g_ := getg() 1677 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 1678 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 1679 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1680 1681 // Initialize this thread to use the m. 1682 asminit() 1683 minit() 1684 1685 // mp.curg is now a real goroutine. 1686 casgstatus(mp.curg, _Gdead, _Gsyscall) 1687 atomic.Xadd(&sched.ngsys, -1) 1688 } 1689 1690 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 1691 1692 // newextram allocates m's and puts them on the extra list. 1693 // It is called with a working local m, so that it can do things 1694 // like call schedlock and allocate. 1695 func newextram() { 1696 c := atomic.Xchg(&extraMWaiters, 0) 1697 if c > 0 { 1698 for i := uint32(0); i < c; i++ { 1699 oneNewExtraM() 1700 } 1701 } else { 1702 // Make sure there is at least one extra M. 1703 mp := lockextra(true) 1704 unlockextra(mp) 1705 if mp == nil { 1706 oneNewExtraM() 1707 } 1708 } 1709 } 1710 1711 // oneNewExtraM allocates an m and puts it on the extra list. 1712 func oneNewExtraM() { 1713 // Create extra goroutine locked to extra m. 1714 // The goroutine is the context in which the cgo callback will run. 1715 // The sched.pc will never be returned to, but setting it to 1716 // goexit makes clear to the traceback routines where 1717 // the goroutine stack ends. 1718 mp := allocm(nil, nil) 1719 gp := malg(4096) 1720 gp.sched.pc = funcPC(goexit) + sys.PCQuantum 1721 gp.sched.sp = gp.stack.hi 1722 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame 1723 gp.sched.lr = 0 1724 gp.sched.g = guintptr(unsafe.Pointer(gp)) 1725 gp.syscallpc = gp.sched.pc 1726 gp.syscallsp = gp.sched.sp 1727 gp.stktopsp = gp.sched.sp 1728 gp.gcscanvalid = true 1729 gp.gcscandone = true 1730 // malg returns status as _Gidle. Change to _Gdead before 1731 // adding to allg where GC can see it. We use _Gdead to hide 1732 // this from tracebacks and stack scans since it isn't a 1733 // "real" goroutine until needm grabs it. 1734 casgstatus(gp, _Gidle, _Gdead) 1735 gp.m = mp 1736 mp.curg = gp 1737 mp.lockedInt++ 1738 mp.lockedg.set(gp) 1739 gp.lockedm.set(mp) 1740 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 1741 if raceenabled { 1742 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum) 1743 } 1744 // put on allg for garbage collector 1745 allgadd(gp) 1746 1747 // gp is now on the allg list, but we don't want it to be 1748 // counted by gcount. It would be more "proper" to increment 1749 // sched.ngfree, but that requires locking. Incrementing ngsys 1750 // has the same effect. 1751 atomic.Xadd(&sched.ngsys, +1) 1752 1753 // Add m to the extra list. 1754 mnext := lockextra(true) 1755 mp.schedlink.set(mnext) 1756 extraMCount++ 1757 unlockextra(mp) 1758 } 1759 1760 // dropm is called when a cgo callback has called needm but is now 1761 // done with the callback and returning back into the non-Go thread. 1762 // It puts the current m back onto the extra list. 1763 // 1764 // The main expense here is the call to signalstack to release the 1765 // m's signal stack, and then the call to needm on the next callback 1766 // from this thread. It is tempting to try to save the m for next time, 1767 // which would eliminate both these costs, but there might not be 1768 // a next time: the current thread (which Go does not control) might exit. 1769 // If we saved the m for that thread, there would be an m leak each time 1770 // such a thread exited. Instead, we acquire and release an m on each 1771 // call. These should typically not be scheduling operations, just a few 1772 // atomics, so the cost should be small. 1773 // 1774 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1775 // variable using pthread_key_create. Unlike the pthread keys we already use 1776 // on OS X, this dummy key would never be read by Go code. It would exist 1777 // only so that we could register at thread-exit-time destructor. 1778 // That destructor would put the m back onto the extra list. 1779 // This is purely a performance optimization. The current version, 1780 // in which dropm happens on each cgo call, is still correct too. 1781 // We may have to keep the current version on systems with cgo 1782 // but without pthreads, like Windows. 1783 func dropm() { 1784 // Clear m and g, and return m to the extra list. 1785 // After the call to setg we can only call nosplit functions 1786 // with no pointer manipulation. 1787 mp := getg().m 1788 1789 // Return mp.curg to dead state. 1790 casgstatus(mp.curg, _Gsyscall, _Gdead) 1791 atomic.Xadd(&sched.ngsys, +1) 1792 1793 // Block signals before unminit. 1794 // Unminit unregisters the signal handling stack (but needs g on some systems). 1795 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 1796 // It's important not to try to handle a signal between those two steps. 1797 sigmask := mp.sigmask 1798 sigblock() 1799 unminit() 1800 1801 mnext := lockextra(true) 1802 extraMCount++ 1803 mp.schedlink.set(mnext) 1804 1805 setg(nil) 1806 1807 // Commit the release of mp. 1808 unlockextra(mp) 1809 1810 msigrestore(sigmask) 1811 } 1812 1813 // A helper function for EnsureDropM. 1814 func getm() uintptr { 1815 return uintptr(unsafe.Pointer(getg().m)) 1816 } 1817 1818 var extram uintptr 1819 var extraMCount uint32 // Protected by lockextra 1820 var extraMWaiters uint32 1821 1822 // lockextra locks the extra list and returns the list head. 1823 // The caller must unlock the list by storing a new list head 1824 // to extram. If nilokay is true, then lockextra will 1825 // return a nil list head if that's what it finds. If nilokay is false, 1826 // lockextra will keep waiting until the list head is no longer nil. 1827 //go:nosplit 1828 func lockextra(nilokay bool) *m { 1829 const locked = 1 1830 1831 incr := false 1832 for { 1833 old := atomic.Loaduintptr(&extram) 1834 if old == locked { 1835 yield := osyield 1836 yield() 1837 continue 1838 } 1839 if old == 0 && !nilokay { 1840 if !incr { 1841 // Add 1 to the number of threads 1842 // waiting for an M. 1843 // This is cleared by newextram. 1844 atomic.Xadd(&extraMWaiters, 1) 1845 incr = true 1846 } 1847 usleep(1) 1848 continue 1849 } 1850 if atomic.Casuintptr(&extram, old, locked) { 1851 return (*m)(unsafe.Pointer(old)) 1852 } 1853 yield := osyield 1854 yield() 1855 continue 1856 } 1857 } 1858 1859 //go:nosplit 1860 func unlockextra(mp *m) { 1861 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1862 } 1863 1864 // execLock serializes exec and clone to avoid bugs or unspecified behaviour 1865 // around exec'ing while creating/destroying threads. See issue #19546. 1866 var execLock rwmutex 1867 1868 // newmHandoff contains a list of m structures that need new OS threads. 1869 // This is used by newm in situations where newm itself can't safely 1870 // start an OS thread. 1871 var newmHandoff struct { 1872 lock mutex 1873 1874 // newm points to a list of M structures that need new OS 1875 // threads. The list is linked through m.schedlink. 1876 newm muintptr 1877 1878 // waiting indicates that wake needs to be notified when an m 1879 // is put on the list. 1880 waiting bool 1881 wake note 1882 1883 // haveTemplateThread indicates that the templateThread has 1884 // been started. This is not protected by lock. Use cas to set 1885 // to 1. 1886 haveTemplateThread uint32 1887 } 1888 1889 // Create a new m. It will start off with a call to fn, or else the scheduler. 1890 // fn needs to be static and not a heap allocated closure. 1891 // May run with m.p==nil, so write barriers are not allowed. 1892 //go:nowritebarrierrec 1893 func newm(fn func(), _p_ *p) { 1894 mp := allocm(_p_, fn) 1895 mp.nextp.set(_p_) 1896 mp.sigmask = initSigmask 1897 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" { 1898 // We're on a locked M or a thread that may have been 1899 // started by C. The kernel state of this thread may 1900 // be strange (the user may have locked it for that 1901 // purpose). We don't want to clone that into another 1902 // thread. Instead, ask a known-good thread to create 1903 // the thread for us. 1904 // 1905 // This is disabled on Plan 9. See golang.org/issue/22227. 1906 // 1907 // TODO: This may be unnecessary on Windows, which 1908 // doesn't model thread creation off fork. 1909 lock(&newmHandoff.lock) 1910 if newmHandoff.haveTemplateThread == 0 { 1911 throw("on a locked thread with no template thread") 1912 } 1913 mp.schedlink = newmHandoff.newm 1914 newmHandoff.newm.set(mp) 1915 if newmHandoff.waiting { 1916 newmHandoff.waiting = false 1917 notewakeup(&newmHandoff.wake) 1918 } 1919 unlock(&newmHandoff.lock) 1920 return 1921 } 1922 newm1(mp) 1923 } 1924 1925 func newm1(mp *m) { 1926 if iscgo { 1927 var ts cgothreadstart 1928 if _cgo_thread_start == nil { 1929 throw("_cgo_thread_start missing") 1930 } 1931 ts.g.set(mp.g0) 1932 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1933 ts.fn = unsafe.Pointer(funcPC(mstart)) 1934 if msanenabled { 1935 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 1936 } 1937 execLock.rlock() // Prevent process clone. 1938 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1939 execLock.runlock() 1940 return 1941 } 1942 execLock.rlock() // Prevent process clone. 1943 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi)) 1944 execLock.runlock() 1945 } 1946 1947 // startTemplateThread starts the template thread if it is not already 1948 // running. 1949 // 1950 // The calling thread must itself be in a known-good state. 1951 func startTemplateThread() { 1952 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) { 1953 return 1954 } 1955 newm(templateThread, nil) 1956 } 1957 1958 // tmeplateThread is a thread in a known-good state that exists solely 1959 // to start new threads in known-good states when the calling thread 1960 // may not be a a good state. 1961 // 1962 // Many programs never need this, so templateThread is started lazily 1963 // when we first enter a state that might lead to running on a thread 1964 // in an unknown state. 1965 // 1966 // templateThread runs on an M without a P, so it must not have write 1967 // barriers. 1968 // 1969 //go:nowritebarrierrec 1970 func templateThread() { 1971 lock(&sched.lock) 1972 sched.nmsys++ 1973 checkdead() 1974 unlock(&sched.lock) 1975 1976 for { 1977 lock(&newmHandoff.lock) 1978 for newmHandoff.newm != 0 { 1979 newm := newmHandoff.newm.ptr() 1980 newmHandoff.newm = 0 1981 unlock(&newmHandoff.lock) 1982 for newm != nil { 1983 next := newm.schedlink.ptr() 1984 newm.schedlink = 0 1985 newm1(newm) 1986 newm = next 1987 } 1988 lock(&newmHandoff.lock) 1989 } 1990 newmHandoff.waiting = true 1991 noteclear(&newmHandoff.wake) 1992 unlock(&newmHandoff.lock) 1993 notesleep(&newmHandoff.wake) 1994 } 1995 } 1996 1997 // Stops execution of the current m until new work is available. 1998 // Returns with acquired P. 1999 func stopm() { 2000 _g_ := getg() 2001 2002 if _g_.m.locks != 0 { 2003 throw("stopm holding locks") 2004 } 2005 if _g_.m.p != 0 { 2006 throw("stopm holding p") 2007 } 2008 if _g_.m.spinning { 2009 throw("stopm spinning") 2010 } 2011 2012 retry: 2013 lock(&sched.lock) 2014 if cprtQ != nil { 2015 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys 2016 if run == 1 && sched.gcwaiting == 0 { 2017 //throw("Should not be here !") 2018 //We are the last and hence should not block, and there should be a p. 2019 //Code inspired from startm. 2020 _p_ := pidleget() 2021 unlock(&sched.lock) 2022 if _p_ == nil { 2023 throw("Spinner for enclave unable to find a p.") 2024 } 2025 _g_.m.nextp.set(_p_) 2026 goto wakeup 2027 } else if run == 0 { 2028 throw("Apparently this can happen") 2029 } 2030 } 2031 mput(_g_.m) 2032 unlock(&sched.lock) 2033 notesleep(&_g_.m.park) 2034 noteclear(&_g_.m.park) 2035 if _g_.m.helpgc != 0 { 2036 // helpgc() set _g_.m.p and _g_.m.mcache, so we have a P. 2037 gchelper() 2038 // Undo the effects of helpgc(). 2039 _g_.m.helpgc = 0 2040 _g_.m.mcache = nil 2041 _g_.m.p = 0 2042 goto retry 2043 } 2044 wakeup: 2045 acquirep(_g_.m.nextp.ptr()) 2046 _g_.m.nextp = 0 2047 } 2048 2049 func mspinning() { 2050 // startm's caller incremented nmspinning. Set the new M's spinning. 2051 getg().m.spinning = true 2052 } 2053 2054 // Schedules some M to run the p (creates an M if necessary). 2055 // If p==nil, tries to get an idle P, if no idle P's does nothing. 2056 // May run with m.p==nil, so write barriers are not allowed. 2057 // If spinning is set, the caller has incremented nmspinning and startm will 2058 // either decrement nmspinning or set m.spinning in the newly started M. 2059 //go:nowritebarrierrec 2060 func startm(_p_ *p, spinning bool) { 2061 lock(&sched.lock) 2062 if _p_ == nil { 2063 _p_ = pidleget() 2064 if _p_ == nil { 2065 unlock(&sched.lock) 2066 if spinning { 2067 // The caller incremented nmspinning, but there are no idle Ps, 2068 // so it's okay to just undo the increment and give up. 2069 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2070 throw("startm: negative nmspinning") 2071 } 2072 } 2073 return 2074 } 2075 } 2076 mp := mget() 2077 unlock(&sched.lock) 2078 if mp == nil { 2079 var fn func() 2080 if spinning { 2081 // The caller incremented nmspinning, so set m.spinning in the new M. 2082 fn = mspinning 2083 } 2084 newm(fn, _p_) 2085 return 2086 } 2087 if mp.spinning { 2088 throw("startm: m is spinning") 2089 } 2090 if mp.nextp != 0 { 2091 throw("startm: m has p") 2092 } 2093 if spinning && !runqempty(_p_) { 2094 throw("startm: p has runnable gs") 2095 } 2096 // The caller incremented nmspinning, so set m.spinning in the new M. 2097 mp.spinning = spinning 2098 mp.nextp.set(_p_) 2099 notewakeup(&mp.park) 2100 } 2101 2102 // Hands off P from syscall or locked M. 2103 // Always runs without a P, so write barriers are not allowed. 2104 //go:nowritebarrierrec 2105 func handoffp(_p_ *p) { 2106 // handoffp must start an M in any situation where 2107 // findrunnable would return a G to run on _p_. 2108 2109 // if it has local work, start it straight away 2110 if !runqempty(_p_) || sched.runqsize != 0 { 2111 startm(_p_, false) 2112 return 2113 } 2114 // if it has GC work, start it straight away 2115 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 2116 startm(_p_, false) 2117 return 2118 } 2119 // no local work, check that there are no spinning/idle M's, 2120 // otherwise our help is not required 2121 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 2122 startm(_p_, true) 2123 return 2124 } 2125 lock(&sched.lock) 2126 if sched.gcwaiting != 0 { 2127 _p_.status = _Pgcstop 2128 sched.stopwait-- 2129 if sched.stopwait == 0 { 2130 notewakeup(&sched.stopnote) 2131 } 2132 unlock(&sched.lock) 2133 return 2134 } 2135 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 2136 sched.safePointFn(_p_) 2137 sched.safePointWait-- 2138 if sched.safePointWait == 0 { 2139 notewakeup(&sched.safePointNote) 2140 } 2141 } 2142 if sched.runqsize != 0 { 2143 unlock(&sched.lock) 2144 startm(_p_, false) 2145 return 2146 } 2147 // If this is the last running P and nobody is polling network, 2148 // need to wakeup another M to poll network. 2149 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 2150 unlock(&sched.lock) 2151 startm(_p_, false) 2152 return 2153 } 2154 pidleput(_p_) 2155 unlock(&sched.lock) 2156 } 2157 2158 // Tries to add one more P to execute G's. 2159 // Called when a G is made runnable (newproc, ready). 2160 func wakep() { 2161 // be conservative about spinning threads 2162 if !atomic.Cas(&sched.nmspinning, 0, 1) { 2163 return 2164 } 2165 startm(nil, true) 2166 } 2167 2168 // Stops execution of the current m that is locked to a g until the g is runnable again. 2169 // Returns with acquired P. 2170 func stoplockedm() { 2171 _g_ := getg() 2172 2173 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m { 2174 throw("stoplockedm: inconsistent locking") 2175 } 2176 if _g_.m.p != 0 { 2177 // Schedule another M to run this p. 2178 _p_ := releasep() 2179 handoffp(_p_) 2180 } 2181 incidlelocked(1) 2182 // Wait until another thread schedules lockedg again. 2183 notesleep(&_g_.m.park) 2184 noteclear(&_g_.m.park) 2185 status := readgstatus(_g_.m.lockedg.ptr()) 2186 if status&^_Gscan != _Grunnable { 2187 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 2188 dumpgstatus(_g_) 2189 throw("stoplockedm: not runnable") 2190 } 2191 acquirep(_g_.m.nextp.ptr()) 2192 _g_.m.nextp = 0 2193 } 2194 2195 // Schedules the locked m to run the locked gp. 2196 // May run during STW, so write barriers are not allowed. 2197 //go:nowritebarrierrec 2198 func startlockedm(gp *g) { 2199 _g_ := getg() 2200 2201 mp := gp.lockedm.ptr() 2202 if mp == _g_.m { 2203 throw("startlockedm: locked to me") 2204 } 2205 if mp.nextp != 0 { 2206 throw("startlockedm: m has p") 2207 } 2208 // directly handoff current P to the locked m 2209 incidlelocked(-1) 2210 _p_ := releasep() 2211 mp.nextp.set(_p_) 2212 notewakeup(&mp.park) 2213 stopm() 2214 } 2215 2216 // Stops the current m for stopTheWorld. 2217 // Returns when the world is restarted. 2218 func gcstopm() { 2219 _g_ := getg() 2220 2221 if sched.gcwaiting == 0 { 2222 throw("gcstopm: not waiting for gc") 2223 } 2224 2225 if _g_.m.spinning { 2226 _g_.m.spinning = false 2227 // OK to just drop nmspinning here, 2228 // startTheWorld will unpark threads as necessary. 2229 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2230 throw("gcstopm: negative nmspinning") 2231 } 2232 } 2233 _p_ := releasep() 2234 lock(&sched.lock) 2235 _p_.status = _Pgcstop 2236 sched.stopwait-- 2237 if sched.stopwait == 0 { 2238 notewakeup(&sched.stopnote) 2239 } 2240 unlock(&sched.lock) 2241 stopm() 2242 } 2243 2244 // Schedules gp to run on the current M. 2245 // If inheritTime is true, gp inherits the remaining time in the 2246 // current time slice. Otherwise, it starts a new time slice. 2247 // Never returns. 2248 // 2249 // Write barriers are allowed because this is called immediately after 2250 // acquiring a P in several places. 2251 // 2252 //go:yeswritebarrierrec 2253 func execute(gp *g, inheritTime bool) { 2254 _g_ := getg() 2255 2256 casgstatus(gp, _Grunnable, _Grunning) 2257 gp.waitsince = 0 2258 gp.preempt = false 2259 gp.stackguard0 = gp.stack.lo + _StackGuard 2260 if !inheritTime { 2261 _g_.m.p.ptr().schedtick++ 2262 } 2263 _g_.m.curg = gp 2264 gp.m = _g_.m 2265 2266 // Check whether the profiler needs to be turned on or off. 2267 hz := sched.profilehz 2268 if _g_.m.profilehz != hz { 2269 setThreadCPUProfiler(hz) 2270 } 2271 2272 if trace.enabled { 2273 // GoSysExit has to happen when we have a P, but before GoStart. 2274 // So we emit it here. 2275 if gp.syscallsp != 0 && gp.sysblocktraced { 2276 traceGoSysExit(gp.sysexitticks) 2277 } 2278 traceGoStart() 2279 } 2280 2281 gogo(&gp.sched) 2282 } 2283 2284 // Finds a runnable goroutine to execute. 2285 // Tries to steal from other P's, get g from global queue, poll network. 2286 func findrunnable() (gp *g, inheritTime bool) { 2287 _g_ := getg() 2288 2289 // The conditions here and in handoffp must agree: if 2290 // findrunnable would return a G to run, handoffp must start 2291 // an M. 2292 2293 top: 2294 _p_ := _g_.m.p.ptr() 2295 if sched.gcwaiting != 0 { 2296 gcstopm() 2297 goto top 2298 } 2299 if _p_.runSafePointFn != 0 { 2300 runSafePointFn() 2301 } 2302 if fingwait && fingwake { 2303 if gp := wakefing(); gp != nil { 2304 ready(gp, 0, true) 2305 } 2306 } 2307 if *cgo_yield != nil { 2308 asmcgocall(*cgo_yield, nil) 2309 } 2310 2311 if cprtQ != nil && cprtQ.size > 0 { 2312 migrateCrossDomain(false) 2313 } 2314 2315 // local runq 2316 if gp, inheritTime := runqget(_p_); gp != nil { 2317 return gp, inheritTime 2318 } 2319 2320 // global runq 2321 if sched.runqsize != 0 { 2322 lock(&sched.lock) 2323 gp := globrunqget(_p_, 0) 2324 unlock(&sched.lock) 2325 if gp != nil { 2326 return gp, false 2327 } 2328 } 2329 2330 // Poll network. 2331 // This netpoll is only an optimization before we resort to stealing. 2332 // We can safely skip it if there are no waiters or a thread is blocked 2333 // in netpoll already. If there is any kind of logical race with that 2334 // blocked thread (e.g. it has already returned from netpoll, but does 2335 // not set lastpoll yet), this thread will do blocking netpoll below 2336 // anyway. 2337 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && 2338 ((!isEnclave && atomic.Load64(&sched.lastpoll) != 0) || 2339 (isEnclave && atomic.Xchg64(&sched.lastpoll, ENCL_POLLING) == ENCL_NPOLLING)) { 2340 if gp := netpoll(false); gp != nil { // non-blocking 2341 // netpoll returns list of goroutines linked by schedlink. 2342 injectglist(gp.schedlink.ptr()) 2343 casgstatus(gp, _Gwaiting, _Grunnable) 2344 if trace.enabled { 2345 traceGoUnpark(gp, 0) 2346 } 2347 if isEnclave { 2348 atomic.Store64(&sched.lastpoll, ENCL_NPOLLING) 2349 } 2350 return gp, false 2351 } 2352 if isEnclave { 2353 atomic.Store64(&sched.lastpoll, ENCL_NPOLLING) 2354 } 2355 } 2356 2357 // Steal work from other P's. 2358 procs := uint32(gomaxprocs) 2359 if atomic.Load(&sched.npidle) == procs-1 { 2360 // Either GOMAXPROCS=1 or everybody, except for us, is idle already. 2361 // New work can appear from returning syscall/cgocall, network or timers. 2362 // Neither of that submits to local run queues, so no point in stealing. 2363 goto stop 2364 } 2365 2366 // If number of spinning M's >= number of busy P's, block. 2367 // This is necessary to prevent excessive CPU consumption 2368 // when GOMAXPROCS>>1 but the program parallelism is low. 2369 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { 2370 goto stop 2371 } 2372 if !_g_.m.spinning { 2373 _g_.m.spinning = true 2374 atomic.Xadd(&sched.nmspinning, 1) 2375 } 2376 for i := 0; i < 4; i++ { 2377 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { 2378 if sched.gcwaiting != 0 { 2379 goto top 2380 } 2381 stealRunNextG := i > 2 // first look for ready queues with more than 1 g 2382 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil { 2383 return gp, false 2384 } 2385 } 2386 } 2387 2388 stop: 2389 2390 if isEnclave { 2391 //We only have one thread so fuck that, go back to beginning 2392 //TODO @aghosn move that somewhere else down there. 2393 goto top 2394 } 2395 2396 // We have nothing to do. If we're in the GC mark phase, can 2397 // safely scan and blacken objects, and have work to do, run 2398 // idle-time marking rather than give up the P. 2399 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) { 2400 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 2401 gp := _p_.gcBgMarkWorker.ptr() 2402 casgstatus(gp, _Gwaiting, _Grunnable) 2403 if trace.enabled { 2404 traceGoUnpark(gp, 0) 2405 } 2406 return gp, false 2407 } 2408 2409 // Before we drop our P, make a snapshot of the allp slice, 2410 // which can change underfoot once we no longer block 2411 // safe-points. We don't need to snapshot the contents because 2412 // everything up to cap(allp) is immutable. 2413 allpSnapshot := allp 2414 2415 // return P and block 2416 lock(&sched.lock) 2417 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { 2418 unlock(&sched.lock) 2419 goto top 2420 } 2421 if sched.runqsize != 0 { 2422 gp := globrunqget(_p_, 0) 2423 unlock(&sched.lock) 2424 return gp, false 2425 } 2426 2427 if cprtQ != nil { 2428 rcount := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys 2429 if rcount == 1 && sched.gcwaiting == 0 { 2430 //we are the last and hence should not block. 2431 //we still have our p. 2432 unlock(&sched.lock) 2433 goto top 2434 } 2435 } 2436 2437 if releasep() != _p_ { 2438 throw("findrunnable: wrong p") 2439 } 2440 pidleput(_p_) 2441 unlock(&sched.lock) 2442 2443 // Delicate dance: thread transitions from spinning to non-spinning state, 2444 // potentially concurrently with submission of new goroutines. We must 2445 // drop nmspinning first and then check all per-P queues again (with 2446 // #StoreLoad memory barrier in between). If we do it the other way around, 2447 // another thread can submit a goroutine after we've checked all run queues 2448 // but before we drop nmspinning; as the result nobody will unpark a thread 2449 // to run the goroutine. 2450 // If we discover new work below, we need to restore m.spinning as a signal 2451 // for resetspinning to unpark a new worker thread (because there can be more 2452 // than one starving goroutine). However, if after discovering new work 2453 // we also observe no idle Ps, it is OK to just park the current thread: 2454 // the system is fully loaded so no spinning threads are required. 2455 // Also see "Worker thread parking/unparking" comment at the top of the file. 2456 wasSpinning := _g_.m.spinning 2457 if _g_.m.spinning { 2458 _g_.m.spinning = false 2459 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2460 throw("findrunnable: negative nmspinning") 2461 } 2462 } 2463 2464 // check all runqueues once again 2465 for _, _p_ := range allpSnapshot { 2466 if !runqempty(_p_) { 2467 lock(&sched.lock) 2468 _p_ = pidleget() 2469 unlock(&sched.lock) 2470 if _p_ != nil { 2471 acquirep(_p_) 2472 if wasSpinning { 2473 _g_.m.spinning = true 2474 atomic.Xadd(&sched.nmspinning, 1) 2475 } 2476 goto top 2477 } 2478 break 2479 } 2480 } 2481 2482 // Check for idle-priority GC work again. 2483 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) { 2484 lock(&sched.lock) 2485 _p_ = pidleget() 2486 if _p_ != nil && _p_.gcBgMarkWorker == 0 { 2487 pidleput(_p_) 2488 _p_ = nil 2489 } 2490 unlock(&sched.lock) 2491 if _p_ != nil { 2492 acquirep(_p_) 2493 if wasSpinning { 2494 _g_.m.spinning = true 2495 atomic.Xadd(&sched.nmspinning, 1) 2496 } 2497 // Go back to idle GC check. 2498 goto stop 2499 } 2500 } 2501 2502 // poll network 2503 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 2504 if _g_.m.p != 0 { 2505 throw("findrunnable: netpoll with p") 2506 } 2507 if _g_.m.spinning { 2508 throw("findrunnable: netpoll with spinning") 2509 } 2510 gp := netpoll(true) // block until new work is available 2511 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 2512 if gp != nil { 2513 lock(&sched.lock) 2514 _p_ = pidleget() 2515 unlock(&sched.lock) 2516 if _p_ != nil { 2517 acquirep(_p_) 2518 injectglist(gp.schedlink.ptr()) 2519 casgstatus(gp, _Gwaiting, _Grunnable) 2520 if trace.enabled { 2521 traceGoUnpark(gp, 0) 2522 } 2523 return gp, false 2524 } 2525 injectglist(gp) 2526 } 2527 } 2528 2529 stopm() 2530 goto top 2531 } 2532 2533 // pollWork returns true if there is non-background work this P could 2534 // be doing. This is a fairly lightweight check to be used for 2535 // background work loops, like idle GC. It checks a subset of the 2536 // conditions checked by the actual scheduler. 2537 func pollWork() bool { 2538 if sched.runqsize != 0 { 2539 return true 2540 } 2541 p := getg().m.p.ptr() 2542 if !runqempty(p) { 2543 return true 2544 } 2545 if cprtQ != nil && cprtQ.size > 0 { 2546 return true 2547 } 2548 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { 2549 if gp := netpoll(false); gp != nil { 2550 injectglist(gp) 2551 return true 2552 } 2553 } 2554 return false 2555 } 2556 2557 func resetspinning() { 2558 _g_ := getg() 2559 if !_g_.m.spinning { 2560 throw("resetspinning: not a spinning m") 2561 } 2562 _g_.m.spinning = false 2563 nmspinning := atomic.Xadd(&sched.nmspinning, -1) 2564 if int32(nmspinning) < 0 { 2565 throw("findrunnable: negative nmspinning") 2566 } 2567 // M wakeup policy is deliberately somewhat conservative, so check if we 2568 // need to wakeup another P here. See "Worker thread parking/unparking" 2569 // comment at the top of the file for details. 2570 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 { 2571 wakep() 2572 } 2573 } 2574 2575 // Injects the list of runnable G's into the scheduler. 2576 // Can run concurrently with GC. 2577 func injectglist(glist *g) { 2578 if glist == nil { 2579 return 2580 } 2581 if trace.enabled { 2582 for gp := glist; gp != nil; gp = gp.schedlink.ptr() { 2583 traceGoUnpark(gp, 0) 2584 } 2585 } 2586 lock(&sched.lock) 2587 var n int 2588 for n = 0; glist != nil; n++ { 2589 gp := glist 2590 glist = gp.schedlink.ptr() 2591 casgstatus(gp, _Gwaiting, _Grunnable) 2592 globrunqput(gp) 2593 } 2594 unlock(&sched.lock) 2595 for ; n != 0 && sched.npidle != 0; n-- { 2596 startm(nil, false) 2597 } 2598 } 2599 2600 // One round of scheduler: find a runnable goroutine and execute it. 2601 // Never returns. 2602 func schedule() { 2603 _g_ := getg() 2604 2605 if _g_.m.locks != 0 { 2606 throw("schedule: holding locks") 2607 } 2608 2609 if _g_.m.lockedg != 0 { 2610 stoplockedm() 2611 execute(_g_.m.lockedg.ptr(), false) // Never returns. 2612 } 2613 2614 // We should not schedule away from a g that is executing a cgo call, 2615 // since the cgo call is using the m's g0 stack. 2616 if _g_.m.incgo { 2617 throw("schedule: in cgo") 2618 } 2619 2620 top: 2621 if sched.gcwaiting != 0 { 2622 gcstopm() 2623 goto top 2624 } 2625 if _g_.m.p.ptr().runSafePointFn != 0 { 2626 runSafePointFn() 2627 } 2628 2629 var gp *g 2630 var inheritTime bool 2631 if trace.enabled || trace.shutdown { 2632 gp = traceReader() 2633 if gp != nil { 2634 casgstatus(gp, _Gwaiting, _Grunnable) 2635 traceGoUnpark(gp, 0) 2636 } 2637 } 2638 2639 if gp == nil && gcBlackenEnabled != 0 { 2640 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 2641 } 2642 2643 if gp == nil && cprtQ != nil { 2644 if _g_.m.p.ptr().schedtick%5 == 0 && cprtQ.size > 0 { 2645 migrateCrossDomain(false) 2646 } 2647 } 2648 2649 if gp == nil { 2650 // Check the global runnable queue once in a while to ensure fairness. 2651 // Otherwise two goroutines can completely occupy the local runqueue 2652 // by constantly respawning each other. 2653 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 2654 lock(&sched.lock) 2655 gp = globrunqget(_g_.m.p.ptr(), 1) 2656 unlock(&sched.lock) 2657 } 2658 } 2659 if gp == nil { 2660 gp, inheritTime = runqget(_g_.m.p.ptr()) 2661 if gp != nil && _g_.m.spinning { 2662 throw("schedule: spinning with local work") 2663 } 2664 } 2665 if gp == nil { 2666 gp, inheritTime = findrunnable() // blocks until work is available 2667 } 2668 2669 // This thread is going to run a goroutine and is not spinning anymore, 2670 // so if it was marked as spinning we need to reset it now and potentially 2671 // start a new spinning M. 2672 if _g_.m.spinning { 2673 resetspinning() 2674 } 2675 2676 if gp.lockedm != 0 { 2677 // Hands off own p to the locked m, 2678 // then blocks waiting for a new p. 2679 startlockedm(gp) 2680 goto top 2681 } 2682 2683 execute(gp, inheritTime) 2684 } 2685 2686 // dropg removes the association between m and the current goroutine m->curg (gp for short). 2687 // Typically a caller sets gp's status away from Grunning and then 2688 // immediately calls dropg to finish the job. The caller is also responsible 2689 // for arranging that gp will be restarted using ready at an 2690 // appropriate time. After calling dropg and arranging for gp to be 2691 // readied later, the caller can do other work but eventually should 2692 // call schedule to restart the scheduling of goroutines on this m. 2693 func dropg() { 2694 _g_ := getg() 2695 2696 setMNoWB(&_g_.m.curg.m, nil) 2697 setGNoWB(&_g_.m.curg, nil) 2698 } 2699 2700 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 2701 unlock((*mutex)(lock)) 2702 return true 2703 } 2704 2705 // park continuation on g0. 2706 func park_m(gp *g) { 2707 _g_ := getg() 2708 2709 if trace.enabled { 2710 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip) 2711 } 2712 2713 casgstatus(gp, _Grunning, _Gwaiting) 2714 dropg() 2715 2716 if _g_.m.waitunlockf != nil { 2717 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf)) 2718 ok := fn(gp, _g_.m.waitlock) 2719 _g_.m.waitunlockf = nil 2720 _g_.m.waitlock = nil 2721 if !ok { 2722 if trace.enabled { 2723 traceGoUnpark(gp, 2) 2724 } 2725 casgstatus(gp, _Gwaiting, _Grunnable) 2726 execute(gp, true) // Schedule it back, never returns. 2727 } 2728 } 2729 schedule() 2730 } 2731 2732 func goschedImpl(gp *g) { 2733 status := readgstatus(gp) 2734 if status&^_Gscan != _Grunning { 2735 dumpgstatus(gp) 2736 throw("bad g status") 2737 } 2738 casgstatus(gp, _Grunning, _Grunnable) 2739 dropg() 2740 lock(&sched.lock) 2741 globrunqput(gp) 2742 unlock(&sched.lock) 2743 2744 schedule() 2745 } 2746 2747 // Gosched continuation on g0. 2748 func gosched_m(gp *g) { 2749 if trace.enabled { 2750 traceGoSched() 2751 } 2752 goschedImpl(gp) 2753 } 2754 2755 // goschedguarded is a forbidden-states-avoided version of gosched_m 2756 func goschedguarded_m(gp *g) { 2757 2758 if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning { 2759 gogo(&gp.sched) // never return 2760 } 2761 2762 if trace.enabled { 2763 traceGoSched() 2764 } 2765 goschedImpl(gp) 2766 } 2767 2768 func gopreempt_m(gp *g) { 2769 if trace.enabled { 2770 traceGoPreempt() 2771 } 2772 goschedImpl(gp) 2773 } 2774 2775 // Finishes execution of the current goroutine. 2776 func goexit1() { 2777 if raceenabled { 2778 racegoend() 2779 } 2780 if trace.enabled { 2781 traceGoEnd() 2782 } 2783 mcall(goexit0) 2784 } 2785 2786 // goexit continuation on g0. 2787 func goexit0(gp *g) { 2788 _g_ := getg() 2789 2790 casgstatus(gp, _Grunning, _Gdead) 2791 if isSystemGoroutine(gp) { 2792 atomic.Xadd(&sched.ngsys, -1) 2793 } 2794 2795 gp.m = nil 2796 locked := gp.lockedm != 0 2797 gp.lockedm = 0 2798 _g_.m.lockedg = 0 2799 gp.paniconfault = false 2800 gp._defer = nil // should be true already but just in case. 2801 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 2802 gp.writebuf = nil 2803 gp.waitreason = "" 2804 gp.param = nil 2805 gp.labels = nil 2806 gp.timer = nil 2807 2808 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { 2809 // Flush assist credit to the global pool. This gives 2810 // better information to pacing if the application is 2811 // rapidly creating an exiting goroutines. 2812 scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes)) 2813 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit) 2814 gp.gcAssistBytes = 0 2815 } 2816 2817 // Note that gp's stack scan is now "valid" because it has no 2818 // stack. 2819 gp.gcscanvalid = true 2820 dropg() 2821 2822 if _g_.m.lockedInt != 0 { 2823 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n") 2824 throw("internal lockOSThread error") 2825 } 2826 _g_.m.lockedExt = 0 2827 gfput(_g_.m.p.ptr(), gp) 2828 if locked { 2829 // The goroutine may have locked this thread because 2830 // it put it in an unusual kernel state. Kill it 2831 // rather than returning it to the thread pool. 2832 2833 // Return to mstart, which will release the P and exit 2834 // the thread. 2835 if GOOS != "plan9" { // See golang.org/issue/22227. 2836 gogo(&_g_.m.g0.sched) 2837 } 2838 } 2839 schedule() 2840 } 2841 2842 // save updates getg().sched to refer to pc and sp so that a following 2843 // gogo will restore pc and sp. 2844 // 2845 // save must not have write barriers because invoking a write barrier 2846 // can clobber getg().sched. 2847 // 2848 //go:nosplit 2849 //go:nowritebarrierrec 2850 func save(pc, sp uintptr) { 2851 _g_ := getg() 2852 2853 _g_.sched.pc = pc 2854 _g_.sched.sp = sp 2855 _g_.sched.lr = 0 2856 _g_.sched.ret = 0 2857 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 2858 // We need to ensure ctxt is zero, but can't have a write 2859 // barrier here. However, it should always already be zero. 2860 // Assert that. 2861 if _g_.sched.ctxt != nil { 2862 badctxt() 2863 } 2864 } 2865 2866 // The goroutine g is about to enter a system call. 2867 // Record that it's not using the cpu anymore. 2868 // This is called only from the go syscall library and cgocall, 2869 // not from the low-level system calls used by the runtime. 2870 // 2871 // Entersyscall cannot split the stack: the gosave must 2872 // make g->sched refer to the caller's stack segment, because 2873 // entersyscall is going to return immediately after. 2874 // 2875 // Nothing entersyscall calls can split the stack either. 2876 // We cannot safely move the stack during an active call to syscall, 2877 // because we do not know which of the uintptr arguments are 2878 // really pointers (back into the stack). 2879 // In practice, this means that we make the fast path run through 2880 // entersyscall doing no-split things, and the slow path has to use systemstack 2881 // to run bigger things on the system stack. 2882 // 2883 // reentersyscall is the entry point used by cgo callbacks, where explicitly 2884 // saved SP and PC are restored. This is needed when exitsyscall will be called 2885 // from a function further up in the call stack than the parent, as g->syscallsp 2886 // must always point to a valid stack frame. entersyscall below is the normal 2887 // entry point for syscalls, which obtains the SP and PC from the caller. 2888 // 2889 // Syscall tracing: 2890 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 2891 // If the syscall does not block, that is it, we do not emit any other events. 2892 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 2893 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 2894 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 2895 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 2896 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 2897 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 2898 // and we wait for the increment before emitting traceGoSysExit. 2899 // Note that the increment is done even if tracing is not enabled, 2900 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 2901 // 2902 //go:nosplit 2903 func reentersyscall(pc, sp uintptr) { 2904 _g_ := getg() 2905 2906 // Disable preemption because during this function g is in Gsyscall status, 2907 // but can have inconsistent g->sched, do not let GC observe it. 2908 _g_.m.locks++ 2909 2910 // Entersyscall must not call any function that might split/grow the stack. 2911 // (See details in comment above.) 2912 // Catch calls that might, by replacing the stack guard with something that 2913 // will trip any stack check and leaving a flag to tell newstack to die. 2914 _g_.stackguard0 = stackPreempt 2915 _g_.throwsplit = true 2916 2917 // Leave SP around for GC and traceback. 2918 save(pc, sp) 2919 _g_.syscallsp = sp 2920 _g_.syscallpc = pc 2921 casgstatus(_g_, _Grunning, _Gsyscall) 2922 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2923 systemstack(func() { 2924 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2925 throw("entersyscall") 2926 }) 2927 } 2928 2929 if trace.enabled { 2930 systemstack(traceGoSysCall) 2931 // systemstack itself clobbers g.sched.{pc,sp} and we might 2932 // need them later when the G is genuinely blocked in a 2933 // syscall 2934 save(pc, sp) 2935 } 2936 2937 if atomic.Load(&sched.sysmonwait) != 0 { 2938 systemstack(entersyscall_sysmon) 2939 save(pc, sp) 2940 } 2941 2942 if _g_.m.p.ptr().runSafePointFn != 0 { 2943 // runSafePointFn may stack split if run on this stack 2944 systemstack(runSafePointFn) 2945 save(pc, sp) 2946 } 2947 2948 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2949 _g_.sysblocktraced = true 2950 _g_.m.mcache = nil 2951 _g_.m.p.ptr().m = 0 2952 atomic.Store(&_g_.m.p.ptr().status, _Psyscall) 2953 if sched.gcwaiting != 0 { 2954 systemstack(entersyscall_gcwait) 2955 save(pc, sp) 2956 } 2957 2958 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). 2959 // We set _StackGuard to StackPreempt so that first split stack check calls morestack. 2960 // Morestack detects this case and throws. 2961 _g_.stackguard0 = stackPreempt 2962 _g_.m.locks-- 2963 } 2964 2965 // Standard syscall entry used by the go syscall library and normal cgo calls. 2966 //go:nosplit 2967 func entersyscall(dummy int32) { 2968 reentersyscall(getcallerpc(), getcallersp(unsafe.Pointer(&dummy))) 2969 } 2970 2971 func entersyscall_sysmon() { 2972 lock(&sched.lock) 2973 if atomic.Load(&sched.sysmonwait) != 0 { 2974 atomic.Store(&sched.sysmonwait, 0) 2975 notewakeup(&sched.sysmonnote) 2976 } 2977 unlock(&sched.lock) 2978 } 2979 2980 func entersyscall_gcwait() { 2981 _g_ := getg() 2982 _p_ := _g_.m.p.ptr() 2983 2984 lock(&sched.lock) 2985 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 2986 if trace.enabled { 2987 traceGoSysBlock(_p_) 2988 traceProcStop(_p_) 2989 } 2990 _p_.syscalltick++ 2991 if sched.stopwait--; sched.stopwait == 0 { 2992 notewakeup(&sched.stopnote) 2993 } 2994 } 2995 unlock(&sched.lock) 2996 } 2997 2998 // The same as entersyscall(), but with a hint that the syscall is blocking. 2999 //go:nosplit 3000 func entersyscallblock(dummy int32) { 3001 _g_ := getg() 3002 3003 _g_.m.locks++ // see comment in entersyscall 3004 _g_.throwsplit = true 3005 _g_.stackguard0 = stackPreempt // see comment in entersyscall 3006 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 3007 _g_.sysblocktraced = true 3008 _g_.m.p.ptr().syscalltick++ 3009 3010 // Leave SP around for GC and traceback. 3011 pc := getcallerpc() 3012 sp := getcallersp(unsafe.Pointer(&dummy)) 3013 save(pc, sp) 3014 _g_.syscallsp = _g_.sched.sp 3015 _g_.syscallpc = _g_.sched.pc 3016 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 3017 sp1 := sp 3018 sp2 := _g_.sched.sp 3019 sp3 := _g_.syscallsp 3020 systemstack(func() { 3021 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 3022 throw("entersyscallblock") 3023 }) 3024 } 3025 casgstatus(_g_, _Grunning, _Gsyscall) 3026 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 3027 systemstack(func() { 3028 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 3029 throw("entersyscallblock") 3030 }) 3031 } 3032 3033 systemstack(entersyscallblock_handoff) 3034 3035 // Resave for traceback during blocked call. 3036 save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy))) 3037 3038 _g_.m.locks-- 3039 } 3040 3041 func entersyscallblock_handoff() { 3042 if trace.enabled { 3043 traceGoSysCall() 3044 traceGoSysBlock(getg().m.p.ptr()) 3045 } 3046 handoffp(releasep()) 3047 } 3048 3049 // The goroutine g exited its system call. 3050 // Arrange for it to run on a cpu again. 3051 // This is called only from the go syscall library, not 3052 // from the low-level system calls used by the runtime. 3053 // 3054 // Write barriers are not allowed because our P may have been stolen. 3055 // 3056 //go:nosplit 3057 //go:nowritebarrierrec 3058 func exitsyscall(dummy int32) { 3059 _g_ := getg() 3060 3061 _g_.m.locks++ // see comment in entersyscall 3062 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp { 3063 // throw calls print which may try to grow the stack, 3064 // but throwsplit == true so the stack can not be grown; 3065 // use systemstack to avoid that possible problem. 3066 systemstack(func() { 3067 throw("exitsyscall: syscall frame is no longer valid") 3068 }) 3069 } 3070 3071 _g_.waitsince = 0 3072 oldp := _g_.m.p.ptr() 3073 if exitsyscallfast() { 3074 if _g_.m.mcache == nil { 3075 systemstack(func() { 3076 throw("lost mcache") 3077 }) 3078 } 3079 if trace.enabled { 3080 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 3081 systemstack(traceGoStart) 3082 } 3083 } 3084 // There's a cpu for us, so we can run. 3085 _g_.m.p.ptr().syscalltick++ 3086 // We need to cas the status and scan before resuming... 3087 casgstatus(_g_, _Gsyscall, _Grunning) 3088 3089 // Garbage collector isn't running (since we are), 3090 // so okay to clear syscallsp. 3091 _g_.syscallsp = 0 3092 _g_.m.locks-- 3093 if _g_.preempt { 3094 // restore the preemption request in case we've cleared it in newstack 3095 _g_.stackguard0 = stackPreempt 3096 } else { 3097 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 3098 _g_.stackguard0 = _g_.stack.lo + _StackGuard 3099 } 3100 _g_.throwsplit = false 3101 return 3102 } 3103 3104 _g_.sysexitticks = 0 3105 if trace.enabled { 3106 // Wait till traceGoSysBlock event is emitted. 3107 // This ensures consistency of the trace (the goroutine is started after it is blocked). 3108 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 3109 osyield() 3110 } 3111 // We can't trace syscall exit right now because we don't have a P. 3112 // Tracing code can invoke write barriers that cannot run without a P. 3113 // So instead we remember the syscall exit time and emit the event 3114 // in execute when we have a P. 3115 _g_.sysexitticks = cputicks() 3116 } 3117 3118 _g_.m.locks-- 3119 3120 // Call the scheduler. 3121 mcall(exitsyscall0) 3122 3123 if _g_.m.mcache == nil { 3124 systemstack(func() { 3125 throw("lost mcache") 3126 }) 3127 } 3128 3129 // Scheduler returned, so we're allowed to run now. 3130 // Delete the syscallsp information that we left for 3131 // the garbage collector during the system call. 3132 // Must wait until now because until gosched returns 3133 // we don't know for sure that the garbage collector 3134 // is not running. 3135 _g_.syscallsp = 0 3136 _g_.m.p.ptr().syscalltick++ 3137 _g_.throwsplit = false 3138 } 3139 3140 //go:nosplit 3141 func exitsyscallfast() bool { 3142 _g_ := getg() 3143 3144 // Freezetheworld sets stopwait but does not retake P's. 3145 if sched.stopwait == freezeStopWait { 3146 _g_.m.mcache = nil 3147 _g_.m.p = 0 3148 return false 3149 } 3150 3151 // Try to re-acquire the last P. 3152 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) { 3153 // There's a cpu for us, so we can run. 3154 exitsyscallfast_reacquired() 3155 return true 3156 } 3157 3158 // Try to get any other idle P. 3159 oldp := _g_.m.p.ptr() 3160 _g_.m.mcache = nil 3161 _g_.m.p = 0 3162 if sched.pidle != 0 { 3163 var ok bool 3164 systemstack(func() { 3165 ok = exitsyscallfast_pidle() 3166 if ok && trace.enabled { 3167 if oldp != nil { 3168 // Wait till traceGoSysBlock event is emitted. 3169 // This ensures consistency of the trace (the goroutine is started after it is blocked). 3170 for oldp.syscalltick == _g_.m.syscalltick { 3171 osyield() 3172 } 3173 } 3174 traceGoSysExit(0) 3175 } 3176 }) 3177 if ok { 3178 return true 3179 } 3180 } 3181 return false 3182 } 3183 3184 // exitsyscallfast_reacquired is the exitsyscall path on which this G 3185 // has successfully reacquired the P it was running on before the 3186 // syscall. 3187 // 3188 // This function is allowed to have write barriers because exitsyscall 3189 // has acquired a P at this point. 3190 // 3191 //go:yeswritebarrierrec 3192 //go:nosplit 3193 func exitsyscallfast_reacquired() { 3194 _g_ := getg() 3195 _g_.m.mcache = _g_.m.p.ptr().mcache 3196 _g_.m.p.ptr().m.set(_g_.m) 3197 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 3198 if trace.enabled { 3199 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 3200 // traceGoSysBlock for this syscall was already emitted, 3201 // but here we effectively retake the p from the new syscall running on the same p. 3202 systemstack(func() { 3203 // Denote blocking of the new syscall. 3204 traceGoSysBlock(_g_.m.p.ptr()) 3205 // Denote completion of the current syscall. 3206 traceGoSysExit(0) 3207 }) 3208 } 3209 _g_.m.p.ptr().syscalltick++ 3210 } 3211 } 3212 3213 func exitsyscallfast_pidle() bool { 3214 lock(&sched.lock) 3215 _p_ := pidleget() 3216 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 3217 atomic.Store(&sched.sysmonwait, 0) 3218 notewakeup(&sched.sysmonnote) 3219 } 3220 unlock(&sched.lock) 3221 if _p_ != nil { 3222 acquirep(_p_) 3223 return true 3224 } 3225 return false 3226 } 3227 3228 // exitsyscall slow path on g0. 3229 // Failed to acquire P, enqueue gp as runnable. 3230 // 3231 //go:nowritebarrierrec 3232 func exitsyscall0(gp *g) { 3233 _g_ := getg() 3234 3235 casgstatus(gp, _Gsyscall, _Grunnable) 3236 dropg() 3237 lock(&sched.lock) 3238 _p_ := pidleget() 3239 if _p_ == nil { 3240 globrunqput(gp) 3241 } else if atomic.Load(&sched.sysmonwait) != 0 { 3242 atomic.Store(&sched.sysmonwait, 0) 3243 notewakeup(&sched.sysmonnote) 3244 } 3245 unlock(&sched.lock) 3246 if _p_ != nil { 3247 acquirep(_p_) 3248 execute(gp, false) // Never returns. 3249 } 3250 if _g_.m.lockedg != 0 { 3251 // Wait until another thread schedules gp and so m again. 3252 stoplockedm() 3253 execute(gp, false) // Never returns. 3254 } 3255 stopm() 3256 schedule() // Never returns. 3257 } 3258 3259 func beforefork() { 3260 gp := getg().m.curg 3261 3262 // Block signals during a fork, so that the child does not run 3263 // a signal handler before exec if a signal is sent to the process 3264 // group. See issue #18600. 3265 gp.m.locks++ 3266 msigsave(gp.m) 3267 sigblock() 3268 3269 // This function is called before fork in syscall package. 3270 // Code between fork and exec must not allocate memory nor even try to grow stack. 3271 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 3272 // runtime_AfterFork will undo this in parent process, but not in child. 3273 gp.stackguard0 = stackFork 3274 } 3275 3276 // Called from syscall package before fork. 3277 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 3278 //go:nosplit 3279 func syscall_runtime_BeforeFork() { 3280 systemstack(beforefork) 3281 } 3282 3283 func afterfork() { 3284 gp := getg().m.curg 3285 3286 // See the comments in beforefork. 3287 gp.stackguard0 = gp.stack.lo + _StackGuard 3288 3289 msigrestore(gp.m.sigmask) 3290 3291 gp.m.locks-- 3292 } 3293 3294 // Called from syscall package after fork in parent. 3295 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 3296 //go:nosplit 3297 func syscall_runtime_AfterFork() { 3298 systemstack(afterfork) 3299 } 3300 3301 // inForkedChild is true while manipulating signals in the child process. 3302 // This is used to avoid calling libc functions in case we are using vfork. 3303 var inForkedChild bool 3304 3305 // Called from syscall package after fork in child. 3306 // It resets non-sigignored signals to the default handler, and 3307 // restores the signal mask in preparation for the exec. 3308 // 3309 // Because this might be called during a vfork, and therefore may be 3310 // temporarily sharing address space with the parent process, this must 3311 // not change any global variables or calling into C code that may do so. 3312 // 3313 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild 3314 //go:nosplit 3315 //go:nowritebarrierrec 3316 func syscall_runtime_AfterForkInChild() { 3317 // It's OK to change the global variable inForkedChild here 3318 // because we are going to change it back. There is no race here, 3319 // because if we are sharing address space with the parent process, 3320 // then the parent process can not be running concurrently. 3321 inForkedChild = true 3322 3323 clearSignalHandlers() 3324 3325 // When we are the child we are the only thread running, 3326 // so we know that nothing else has changed gp.m.sigmask. 3327 msigrestore(getg().m.sigmask) 3328 3329 inForkedChild = false 3330 } 3331 3332 // Called from syscall package before Exec. 3333 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec 3334 func syscall_runtime_BeforeExec() { 3335 // Prevent thread creation during exec. 3336 execLock.lock() 3337 } 3338 3339 // Called from syscall package after Exec. 3340 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec 3341 func syscall_runtime_AfterExec() { 3342 execLock.unlock() 3343 } 3344 3345 // Allocate a new g, with a stack big enough for stacksize bytes. 3346 func malg(stacksize int32) *g { 3347 newg := new(g) 3348 if stacksize >= 0 { 3349 stacksize = round2(_StackSystem + stacksize) 3350 systemstack(func() { 3351 newg.stack = stackalloc(uint32(stacksize)) 3352 }) 3353 newg.stackguard0 = newg.stack.lo + _StackGuard 3354 newg.stackguard1 = ^uintptr(0) 3355 } 3356 newg.isencl = isEnclave 3357 return newg 3358 } 3359 3360 // Create a new g running fn with siz bytes of arguments. 3361 // Put it on the queue of g's waiting to run. 3362 // The compiler turns a go statement into a call to this. 3363 // Cannot split the stack because it assumes that the arguments 3364 // are available sequentially after &fn; they would not be 3365 // copied if a stack split occurred. 3366 //go:nosplit 3367 func newproc(siz int32, fn *funcval) { 3368 argp := add(unsafe.Pointer(&fn), sys.PtrSize) 3369 pc := getcallerpc() 3370 systemstack(func() { 3371 newproc1(fn, (*uint8)(argp), siz, pc) 3372 }) 3373 } 3374 3375 // Create a new g running fn with narg bytes of arguments starting 3376 // at argp. callerpc is the address of the go statement that created 3377 // this. The new g is put on the queue of g's waiting to run. 3378 func newproc1(fn *funcval, argp *uint8, narg int32, callerpc uintptr) { 3379 _g_ := getg() 3380 3381 if fn == nil { 3382 _g_.m.throwing = -1 // do not dump full stacks 3383 throw("go of nil func value") 3384 } 3385 _g_.m.locks++ // disable preemption because it can be holding p in a local var 3386 siz := narg 3387 siz = (siz + 7) &^ 7 3388 3389 // We could allocate a larger initial stack if necessary. 3390 // Not worth it: this is almost always an error. 3391 // 4*sizeof(uintreg): extra space added below 3392 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 3393 if siz >= _StackMin-4*sys.RegSize-sys.RegSize { 3394 throw("newproc: function arguments too large for new goroutine") 3395 } 3396 3397 _p_ := _g_.m.p.ptr() 3398 newg := gfget(_p_) 3399 if newg == nil { 3400 newg = malg(_StackMin) 3401 casgstatus(newg, _Gidle, _Gdead) 3402 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 3403 } 3404 if newg.stack.hi == 0 { 3405 throw("newproc1: newg missing stack") 3406 } 3407 3408 if readgstatus(newg) != _Gdead { 3409 throw("newproc1: new g is not Gdead") 3410 } 3411 3412 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame 3413 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign 3414 sp := newg.stack.hi - totalSize 3415 spArg := sp 3416 if usesLR { 3417 // caller's LR 3418 *(*uintptr)(unsafe.Pointer(sp)) = 0 3419 prepGoExitFrame(sp) 3420 spArg += sys.MinFrameSize 3421 } 3422 if narg > 0 { 3423 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) 3424 // This is a stack-to-stack copy. If write barriers 3425 // are enabled and the source stack is grey (the 3426 // destination is always black), then perform a 3427 // barrier copy. We do this *after* the memmove 3428 // because the destination stack may have garbage on 3429 // it. 3430 if writeBarrier.needed && !_g_.m.curg.gcscandone { 3431 f := findfunc(fn.fn) 3432 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 3433 // We're in the prologue, so it's always stack map index 0. 3434 bv := stackmapdata(stkmap, 0) 3435 bulkBarrierBitmap(spArg, spArg, uintptr(narg), 0, bv.bytedata) 3436 } 3437 } 3438 3439 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 3440 newg.sched.sp = sp 3441 newg.stktopsp = sp 3442 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 3443 newg.sched.g = guintptr(unsafe.Pointer(newg)) 3444 gostartcallfn(&newg.sched, fn) 3445 newg.gopc = callerpc 3446 newg.startpc = fn.fn 3447 if _g_.m.curg != nil { 3448 newg.labels = _g_.m.curg.labels 3449 } 3450 if isSystemGoroutine(newg) { 3451 atomic.Xadd(&sched.ngsys, +1) 3452 } 3453 newg.gcscanvalid = false 3454 casgstatus(newg, _Gdead, _Grunnable) 3455 3456 if _p_.goidcache == _p_.goidcacheend { 3457 // Sched.goidgen is the last allocated id, 3458 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 3459 // At startup sched.goidgen=0, so main goroutine receives goid=1. 3460 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 3461 _p_.goidcache -= _GoidCacheBatch - 1 3462 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 3463 } 3464 newg.goid = int64(_p_.goidcache) 3465 _p_.goidcache++ 3466 if raceenabled { 3467 newg.racectx = racegostart(callerpc) 3468 } 3469 if trace.enabled { 3470 traceGoCreate(newg, newg.startpc) 3471 } 3472 runqput(_p_, newg, true) 3473 3474 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted { 3475 wakep() 3476 } 3477 _g_.m.locks-- 3478 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 3479 _g_.stackguard0 = stackPreempt 3480 } 3481 } 3482 3483 // Put on gfree list. 3484 // If local list is too long, transfer a batch to the global list. 3485 func gfput(_p_ *p, gp *g) { 3486 if readgstatus(gp) != _Gdead { 3487 throw("gfput: bad status (not Gdead)") 3488 } 3489 3490 stksize := gp.stack.hi - gp.stack.lo 3491 3492 if stksize != _FixedStack { 3493 // non-standard stack size - free it. 3494 stackfree(gp.stack) 3495 gp.stack.lo = 0 3496 gp.stack.hi = 0 3497 gp.stackguard0 = 0 3498 } 3499 3500 gp.schedlink.set(_p_.gfree) 3501 _p_.gfree = gp 3502 _p_.gfreecnt++ 3503 if _p_.gfreecnt >= 64 { 3504 lock(&sched.gflock) 3505 for _p_.gfreecnt >= 32 { 3506 _p_.gfreecnt-- 3507 gp = _p_.gfree 3508 _p_.gfree = gp.schedlink.ptr() 3509 if gp.stack.lo == 0 { 3510 gp.schedlink.set(sched.gfreeNoStack) 3511 sched.gfreeNoStack = gp 3512 } else { 3513 gp.schedlink.set(sched.gfreeStack) 3514 sched.gfreeStack = gp 3515 } 3516 sched.ngfree++ 3517 } 3518 unlock(&sched.gflock) 3519 } 3520 } 3521 3522 // Get from gfree list. 3523 // If local list is empty, grab a batch from global list. 3524 func gfget(_p_ *p) *g { 3525 retry: 3526 gp := _p_.gfree 3527 if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) { 3528 lock(&sched.gflock) 3529 for _p_.gfreecnt < 32 { 3530 if sched.gfreeStack != nil { 3531 // Prefer Gs with stacks. 3532 gp = sched.gfreeStack 3533 sched.gfreeStack = gp.schedlink.ptr() 3534 } else if sched.gfreeNoStack != nil { 3535 gp = sched.gfreeNoStack 3536 sched.gfreeNoStack = gp.schedlink.ptr() 3537 } else { 3538 break 3539 } 3540 _p_.gfreecnt++ 3541 sched.ngfree-- 3542 gp.schedlink.set(_p_.gfree) 3543 _p_.gfree = gp 3544 } 3545 unlock(&sched.gflock) 3546 goto retry 3547 } 3548 if gp != nil { 3549 _p_.gfree = gp.schedlink.ptr() 3550 _p_.gfreecnt-- 3551 if gp.stack.lo == 0 { 3552 // Stack was deallocated in gfput. Allocate a new one. 3553 systemstack(func() { 3554 gp.stack = stackalloc(_FixedStack) 3555 }) 3556 gp.stackguard0 = gp.stack.lo + _StackGuard 3557 } else { 3558 if raceenabled { 3559 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3560 } 3561 if msanenabled { 3562 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3563 } 3564 } 3565 } 3566 return gp 3567 } 3568 3569 // Purge all cached G's from gfree list to the global list. 3570 func gfpurge(_p_ *p) { 3571 lock(&sched.gflock) 3572 for _p_.gfreecnt != 0 { 3573 _p_.gfreecnt-- 3574 gp := _p_.gfree 3575 _p_.gfree = gp.schedlink.ptr() 3576 if gp.stack.lo == 0 { 3577 gp.schedlink.set(sched.gfreeNoStack) 3578 sched.gfreeNoStack = gp 3579 } else { 3580 gp.schedlink.set(sched.gfreeStack) 3581 sched.gfreeStack = gp 3582 } 3583 sched.ngfree++ 3584 } 3585 unlock(&sched.gflock) 3586 } 3587 3588 // Breakpoint executes a breakpoint trap. 3589 func Breakpoint() { 3590 breakpoint() 3591 } 3592 3593 // dolockOSThread is called by LockOSThread and lockOSThread below 3594 // after they modify m.locked. Do not allow preemption during this call, 3595 // or else the m might be different in this function than in the caller. 3596 //go:nosplit 3597 func dolockOSThread() { 3598 _g_ := getg() 3599 _g_.m.lockedg.set(_g_) 3600 _g_.lockedm.set(_g_.m) 3601 } 3602 3603 //go:nosplit 3604 3605 // LockOSThread wires the calling goroutine to its current operating system thread. 3606 // The calling goroutine will always execute in that thread, 3607 // and no other goroutine will execute in it, 3608 // until the calling goroutine has made as many calls to 3609 // UnlockOSThread as to LockOSThread. 3610 // If the calling goroutine exits without unlocking the thread, 3611 // the thread will be terminated. 3612 // 3613 // A goroutine should call LockOSThread before calling OS services or 3614 // non-Go library functions that depend on per-thread state. 3615 func LockOSThread() { 3616 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" { 3617 // If we need to start a new thread from the locked 3618 // thread, we need the template thread. Start it now 3619 // while we're in a known-good state. 3620 startTemplateThread() 3621 } 3622 _g_ := getg() 3623 _g_.m.lockedExt++ 3624 if _g_.m.lockedExt == 0 { 3625 _g_.m.lockedExt-- 3626 panic("LockOSThread nesting overflow") 3627 } 3628 dolockOSThread() 3629 } 3630 3631 //go:nosplit 3632 func lockOSThread() { 3633 getg().m.lockedInt++ 3634 dolockOSThread() 3635 } 3636 3637 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 3638 // after they update m->locked. Do not allow preemption during this call, 3639 // or else the m might be in different in this function than in the caller. 3640 //go:nosplit 3641 func dounlockOSThread() { 3642 _g_ := getg() 3643 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 { 3644 return 3645 } 3646 _g_.m.lockedg = 0 3647 _g_.lockedm = 0 3648 } 3649 3650 //go:nosplit 3651 3652 // UnlockOSThread undoes an earlier call to LockOSThread. 3653 // If this drops the number of active LockOSThread calls on the 3654 // calling goroutine to zero, it unwires the calling goroutine from 3655 // its fixed operating system thread. 3656 // If there are no active LockOSThread calls, this is a no-op. 3657 // 3658 // Before calling UnlockOSThread, the caller must ensure that the OS 3659 // thread is suitable for running other goroutines. If the caller made 3660 // any permanent changes to the state of the thread that would affect 3661 // other goroutines, it should not call this function and thus leave 3662 // the goroutine locked to the OS thread until the goroutine (and 3663 // hence the thread) exits. 3664 func UnlockOSThread() { 3665 _g_ := getg() 3666 if _g_.m.lockedExt == 0 { 3667 return 3668 } 3669 _g_.m.lockedExt-- 3670 dounlockOSThread() 3671 } 3672 3673 //go:nosplit 3674 func unlockOSThread() { 3675 _g_ := getg() 3676 if _g_.m.lockedInt == 0 { 3677 systemstack(badunlockosthread) 3678 } 3679 _g_.m.lockedInt-- 3680 dounlockOSThread() 3681 } 3682 3683 func badunlockosthread() { 3684 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 3685 } 3686 3687 func gcount() int32 { 3688 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys)) 3689 for _, _p_ := range allp { 3690 n -= _p_.gfreecnt 3691 } 3692 3693 // All these variables can be changed concurrently, so the result can be inconsistent. 3694 // But at least the current goroutine is running. 3695 if n < 1 { 3696 n = 1 3697 } 3698 return n 3699 } 3700 3701 func mcount() int32 { 3702 return int32(sched.mnext - sched.nmfreed) 3703 } 3704 3705 var prof struct { 3706 signalLock uint32 3707 hz int32 3708 } 3709 3710 func _System() { _System() } 3711 func _ExternalCode() { _ExternalCode() } 3712 func _LostExternalCode() { _LostExternalCode() } 3713 func _GC() { _GC() } 3714 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } 3715 3716 // Counts SIGPROFs received while in atomic64 critical section, on mips{,le} 3717 var lostAtomic64Count uint64 3718 3719 // Called if we receive a SIGPROF signal. 3720 // Called by the signal handler, may run during STW. 3721 //go:nowritebarrierrec 3722 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 3723 if prof.hz == 0 { 3724 return 3725 } 3726 3727 // On mips{,le}, 64bit atomics are emulated with spinlocks, in 3728 // runtime/internal/atomic. If SIGPROF arrives while the program is inside 3729 // the critical section, it creates a deadlock (when writing the sample). 3730 // As a workaround, create a counter of SIGPROFs while in critical section 3731 // to store the count, and pass it to sigprof.add() later when SIGPROF is 3732 // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc). 3733 if GOARCH == "mips" || GOARCH == "mipsle" { 3734 if f := findfunc(pc); f.valid() { 3735 if hasprefix(funcname(f), "runtime/internal/atomic") { 3736 lostAtomic64Count++ 3737 return 3738 } 3739 } 3740 } 3741 3742 // Profiling runs concurrently with GC, so it must not allocate. 3743 // Set a trap in case the code does allocate. 3744 // Note that on windows, one thread takes profiles of all the 3745 // other threads, so mp is usually not getg().m. 3746 // In fact mp may not even be stopped. 3747 // See golang.org/issue/17165. 3748 getg().m.mallocing++ 3749 3750 // Define that a "user g" is a user-created goroutine, and a "system g" 3751 // is one that is m->g0 or m->gsignal. 3752 // 3753 // We might be interrupted for profiling halfway through a 3754 // goroutine switch. The switch involves updating three (or four) values: 3755 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 3756 // because once it gets updated the new g is running. 3757 // 3758 // When switching from a user g to a system g, LR is not considered live, 3759 // so the update only affects g, SP, and PC. Since PC must be last, there 3760 // the possible partial transitions in ordinary execution are (1) g alone is updated, 3761 // (2) both g and SP are updated, and (3) SP alone is updated. 3762 // If SP or g alone is updated, we can detect the partial transition by checking 3763 // whether the SP is within g's stack bounds. (We could also require that SP 3764 // be changed only after g, but the stack bounds check is needed by other 3765 // cases, so there is no need to impose an additional requirement.) 3766 // 3767 // There is one exceptional transition to a system g, not in ordinary execution. 3768 // When a signal arrives, the operating system starts the signal handler running 3769 // with an updated PC and SP. The g is updated last, at the beginning of the 3770 // handler. There are two reasons this is okay. First, until g is updated the 3771 // g and SP do not match, so the stack bounds check detects the partial transition. 3772 // Second, signal handlers currently run with signals disabled, so a profiling 3773 // signal cannot arrive during the handler. 3774 // 3775 // When switching from a system g to a user g, there are three possibilities. 3776 // 3777 // First, it may be that the g switch has no PC update, because the SP 3778 // either corresponds to a user g throughout (as in asmcgocall) 3779 // or because it has been arranged to look like a user g frame 3780 // (as in cgocallback_gofunc). In this case, since the entire 3781 // transition is a g+SP update, a partial transition updating just one of 3782 // those will be detected by the stack bounds check. 3783 // 3784 // Second, when returning from a signal handler, the PC and SP updates 3785 // are performed by the operating system in an atomic update, so the g 3786 // update must be done before them. The stack bounds check detects 3787 // the partial transition here, and (again) signal handlers run with signals 3788 // disabled, so a profiling signal cannot arrive then anyway. 3789 // 3790 // Third, the common case: it may be that the switch updates g, SP, and PC 3791 // separately. If the PC is within any of the functions that does this, 3792 // we don't ask for a traceback. C.F. the function setsSP for more about this. 3793 // 3794 // There is another apparently viable approach, recorded here in case 3795 // the "PC within setsSP function" check turns out not to be usable. 3796 // It would be possible to delay the update of either g or SP until immediately 3797 // before the PC update instruction. Then, because of the stack bounds check, 3798 // the only problematic interrupt point is just before that PC update instruction, 3799 // and the sigprof handler can detect that instruction and simulate stepping past 3800 // it in order to reach a consistent state. On ARM, the update of g must be made 3801 // in two places (in R10 and also in a TLS slot), so the delayed update would 3802 // need to be the SP update. The sigprof handler must read the instruction at 3803 // the current PC and if it was the known instruction (for example, JMP BX or 3804 // MOV R2, PC), use that other register in place of the PC value. 3805 // The biggest drawback to this solution is that it requires that we can tell 3806 // whether it's safe to read from the memory pointed at by PC. 3807 // In a correct program, we can test PC == nil and otherwise read, 3808 // but if a profiling signal happens at the instant that a program executes 3809 // a bad jump (before the program manages to handle the resulting fault) 3810 // the profiling handler could fault trying to read nonexistent memory. 3811 // 3812 // To recap, there are no constraints on the assembly being used for the 3813 // transition. We simply require that g and SP match and that the PC is not 3814 // in gogo. 3815 traceback := true 3816 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) { 3817 traceback = false 3818 } 3819 var stk [maxCPUProfStack]uintptr 3820 n := 0 3821 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 3822 cgoOff := 0 3823 // Check cgoCallersUse to make sure that we are not 3824 // interrupting other code that is fiddling with 3825 // cgoCallers. We are running in a signal handler 3826 // with all signals blocked, so we don't have to worry 3827 // about any other code interrupting us. 3828 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 { 3829 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { 3830 cgoOff++ 3831 } 3832 copy(stk[:], mp.cgoCallers[:cgoOff]) 3833 mp.cgoCallers[0] = 0 3834 } 3835 3836 // Collect Go stack that leads to the cgo call. 3837 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0) 3838 } else if traceback { 3839 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 3840 } 3841 3842 if n <= 0 { 3843 // Normal traceback is impossible or has failed. 3844 // See if it falls into several common cases. 3845 n = 0 3846 if GOOS == "windows" && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 3847 // Libcall, i.e. runtime syscall on windows. 3848 // Collect Go stack that leads to the call. 3849 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 3850 } 3851 if n == 0 { 3852 // If all of the above has failed, account it against abstract "System" or "GC". 3853 n = 2 3854 // "ExternalCode" is better than "etext". 3855 if pc > firstmoduledata.etext { 3856 pc = funcPC(_ExternalCode) + sys.PCQuantum 3857 } 3858 stk[0] = pc 3859 if mp.preemptoff != "" || mp.helpgc != 0 { 3860 stk[1] = funcPC(_GC) + sys.PCQuantum 3861 } else { 3862 stk[1] = funcPC(_System) + sys.PCQuantum 3863 } 3864 } 3865 } 3866 3867 if prof.hz != 0 { 3868 if (GOARCH == "mips" || GOARCH == "mipsle") && lostAtomic64Count > 0 { 3869 cpuprof.addLostAtomic64(lostAtomic64Count) 3870 lostAtomic64Count = 0 3871 } 3872 cpuprof.add(gp, stk[:n]) 3873 } 3874 getg().m.mallocing-- 3875 } 3876 3877 // If the signal handler receives a SIGPROF signal on a non-Go thread, 3878 // it tries to collect a traceback into sigprofCallers. 3879 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback. 3880 var sigprofCallers cgoCallers 3881 var sigprofCallersUse uint32 3882 3883 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, 3884 // and the signal handler collected a stack trace in sigprofCallers. 3885 // When this is called, sigprofCallersUse will be non-zero. 3886 // g is nil, and what we can do is very limited. 3887 //go:nosplit 3888 //go:nowritebarrierrec 3889 func sigprofNonGo() { 3890 if prof.hz != 0 { 3891 n := 0 3892 for n < len(sigprofCallers) && sigprofCallers[n] != 0 { 3893 n++ 3894 } 3895 cpuprof.addNonGo(sigprofCallers[:n]) 3896 } 3897 3898 atomic.Store(&sigprofCallersUse, 0) 3899 } 3900 3901 // sigprofNonGoPC is called when a profiling signal arrived on a 3902 // non-Go thread and we have a single PC value, not a stack trace. 3903 // g is nil, and what we can do is very limited. 3904 //go:nosplit 3905 //go:nowritebarrierrec 3906 func sigprofNonGoPC(pc uintptr) { 3907 if prof.hz != 0 { 3908 stk := []uintptr{ 3909 pc, 3910 funcPC(_ExternalCode) + sys.PCQuantum, 3911 } 3912 cpuprof.addNonGo(stk) 3913 } 3914 } 3915 3916 // Reports whether a function will set the SP 3917 // to an absolute value. Important that 3918 // we don't traceback when these are at the bottom 3919 // of the stack since we can't be sure that we will 3920 // find the caller. 3921 // 3922 // If the function is not on the bottom of the stack 3923 // we assume that it will have set it up so that traceback will be consistent, 3924 // either by being a traceback terminating function 3925 // or putting one on the stack at the right offset. 3926 func setsSP(pc uintptr) bool { 3927 f := findfunc(pc) 3928 if !f.valid() { 3929 // couldn't find the function for this PC, 3930 // so assume the worst and stop traceback 3931 return true 3932 } 3933 switch f.funcID { 3934 case funcID_gogo, funcID_systemstack, funcID_mcall, funcID_morestack: 3935 return true 3936 } 3937 return false 3938 } 3939 3940 // setcpuprofilerate sets the CPU profiling rate to hz times per second. 3941 // If hz <= 0, setcpuprofilerate turns off CPU profiling. 3942 func setcpuprofilerate(hz int32) { 3943 // Force sane arguments. 3944 if hz < 0 { 3945 hz = 0 3946 } 3947 3948 // Disable preemption, otherwise we can be rescheduled to another thread 3949 // that has profiling enabled. 3950 _g_ := getg() 3951 _g_.m.locks++ 3952 3953 // Stop profiler on this thread so that it is safe to lock prof. 3954 // if a profiling signal came in while we had prof locked, 3955 // it would deadlock. 3956 setThreadCPUProfiler(0) 3957 3958 for !atomic.Cas(&prof.signalLock, 0, 1) { 3959 osyield() 3960 } 3961 if prof.hz != hz { 3962 setProcessCPUProfiler(hz) 3963 prof.hz = hz 3964 } 3965 atomic.Store(&prof.signalLock, 0) 3966 3967 lock(&sched.lock) 3968 sched.profilehz = hz 3969 unlock(&sched.lock) 3970 3971 if hz != 0 { 3972 setThreadCPUProfiler(hz) 3973 } 3974 3975 _g_.m.locks-- 3976 } 3977 3978 // Change number of processors. The world is stopped, sched is locked. 3979 // gcworkbufs are not being modified by either the GC or 3980 // the write barrier code. 3981 // Returns list of Ps with local work, they need to be scheduled by the caller. 3982 func procresize(nprocs int32) *p { 3983 old := gomaxprocs 3984 if old < 0 || nprocs <= 0 { 3985 throw("procresize: invalid arg") 3986 } 3987 if trace.enabled && !isEnclave { 3988 traceGomaxprocs(nprocs) 3989 } 3990 3991 // update statistics 3992 now := nanotime() 3993 if sched.procresizetime != 0 { 3994 sched.totaltime += int64(old) * (now - sched.procresizetime) 3995 } 3996 sched.procresizetime = now 3997 3998 // Grow allp if necessary. 3999 if nprocs > int32(len(allp)) { 4000 // Synchronize with retake, which could be running 4001 // concurrently since it doesn't run on a P. 4002 lock(&allpLock) 4003 if nprocs <= int32(cap(allp)) { 4004 allp = allp[:nprocs] 4005 } else { 4006 nallp := make([]*p, nprocs) 4007 // Copy everything up to allp's cap so we 4008 // never lose old allocated Ps. 4009 copy(nallp, allp[:cap(allp)]) 4010 allp = nallp 4011 } 4012 unlock(&allpLock) 4013 } 4014 4015 // initialize new P's 4016 for i := int32(0); i < nprocs; i++ { 4017 pp := allp[i] 4018 if pp == nil { 4019 pp = new(p) 4020 pp.id = i 4021 pp.status = _Pgcstop 4022 pp.sudogcache = pp.sudogbuf[:0] 4023 for i := range pp.deferpool { 4024 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 4025 } 4026 pp.wbBuf.reset() 4027 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 4028 } 4029 if pp.mcache == nil { 4030 if old == 0 && i == 0 { 4031 if getg().m.mcache == nil { 4032 throw("missing mcache?") 4033 } 4034 pp.mcache = getg().m.mcache // bootstrap 4035 } else { 4036 pp.mcache = allocmcache() 4037 } 4038 } 4039 if raceenabled && pp.racectx == 0 { 4040 if old == 0 && i == 0 { 4041 pp.racectx = raceprocctx0 4042 raceprocctx0 = 0 // bootstrap 4043 } else { 4044 pp.racectx = raceproccreate() 4045 } 4046 } 4047 } 4048 4049 // free unused P's 4050 for i := nprocs; i < old; i++ { 4051 p := allp[i] 4052 if trace.enabled && p == getg().m.p.ptr() { 4053 // moving to p[0], pretend that we were descheduled 4054 // and then scheduled again to keep the trace sane. 4055 traceGoSched() 4056 traceProcStop(p) 4057 } 4058 // move all runnable goroutines to the global queue 4059 for p.runqhead != p.runqtail { 4060 // pop from tail of local queue 4061 p.runqtail-- 4062 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr() 4063 // push onto head of global queue 4064 globrunqputhead(gp) 4065 } 4066 if p.runnext != 0 { 4067 globrunqputhead(p.runnext.ptr()) 4068 p.runnext = 0 4069 } 4070 // if there's a background worker, make it runnable and put 4071 // it on the global queue so it can clean itself up 4072 if gp := p.gcBgMarkWorker.ptr(); gp != nil { 4073 casgstatus(gp, _Gwaiting, _Grunnable) 4074 if trace.enabled { 4075 traceGoUnpark(gp, 0) 4076 } 4077 globrunqput(gp) 4078 // This assignment doesn't race because the 4079 // world is stopped. 4080 p.gcBgMarkWorker.set(nil) 4081 } 4082 // Flush p's write barrier buffer. 4083 if gcphase != _GCoff { 4084 wbBufFlush1(p) 4085 p.gcw.dispose() 4086 } 4087 for i := range p.sudogbuf { 4088 p.sudogbuf[i] = nil 4089 } 4090 p.sudogcache = p.sudogbuf[:0] 4091 for i := range p.deferpool { 4092 for j := range p.deferpoolbuf[i] { 4093 p.deferpoolbuf[i][j] = nil 4094 } 4095 p.deferpool[i] = p.deferpoolbuf[i][:0] 4096 } 4097 freemcache(p.mcache) 4098 p.mcache = nil 4099 gfpurge(p) 4100 traceProcFree(p) 4101 if raceenabled { 4102 raceprocdestroy(p.racectx) 4103 p.racectx = 0 4104 } 4105 p.gcAssistTime = 0 4106 p.status = _Pdead 4107 // can't free P itself because it can be referenced by an M in syscall 4108 } 4109 4110 // Trim allp. 4111 if int32(len(allp)) != nprocs { 4112 lock(&allpLock) 4113 allp = allp[:nprocs] 4114 unlock(&allpLock) 4115 } 4116 4117 _g_ := getg() 4118 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 4119 // continue to use the current P 4120 _g_.m.p.ptr().status = _Prunning 4121 } else { 4122 // release the current P and acquire allp[0] 4123 if _g_.m.p != 0 { 4124 _g_.m.p.ptr().m = 0 4125 } 4126 _g_.m.p = 0 4127 _g_.m.mcache = nil 4128 p := allp[0] 4129 p.m = 0 4130 p.status = _Pidle 4131 acquirep(p) 4132 if trace.enabled { 4133 traceGoStart() 4134 } 4135 } 4136 var runnablePs *p 4137 for i := nprocs - 1; i >= 0; i-- { 4138 p := allp[i] 4139 if _g_.m.p.ptr() == p { 4140 continue 4141 } 4142 p.status = _Pidle 4143 if runqempty(p) { 4144 pidleput(p) 4145 } else { 4146 p.m.set(mget()) 4147 p.link.set(runnablePs) 4148 runnablePs = p 4149 } 4150 } 4151 stealOrder.reset(uint32(nprocs)) 4152 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 4153 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 4154 return runnablePs 4155 } 4156 4157 // Associate p and the current m. 4158 // 4159 // This function is allowed to have write barriers even if the caller 4160 // isn't because it immediately acquires _p_. 4161 // 4162 //go:yeswritebarrierrec 4163 func acquirep(_p_ *p) { 4164 // Do the part that isn't allowed to have write barriers. 4165 acquirep1(_p_) 4166 4167 // have p; write barriers now allowed 4168 _g_ := getg() 4169 _g_.m.mcache = _p_.mcache 4170 4171 if trace.enabled { 4172 traceProcStart() 4173 } 4174 } 4175 4176 // acquirep1 is the first step of acquirep, which actually acquires 4177 // _p_. This is broken out so we can disallow write barriers for this 4178 // part, since we don't yet have a P. 4179 // 4180 //go:nowritebarrierrec 4181 func acquirep1(_p_ *p) { 4182 _g_ := getg() 4183 4184 if _g_.m.p != 0 || _g_.m.mcache != nil { 4185 throw("acquirep: already in go") 4186 } 4187 if _p_.m != 0 || _p_.status != _Pidle { 4188 id := int64(0) 4189 if _p_.m != 0 { 4190 id = _p_.m.ptr().id 4191 } 4192 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 4193 throw("acquirep: invalid p state") 4194 } 4195 _g_.m.p.set(_p_) 4196 _p_.m.set(_g_.m) 4197 _p_.status = _Prunning 4198 } 4199 4200 // Disassociate p and the current m. 4201 func releasep() *p { 4202 _g_ := getg() 4203 4204 if _g_.m.p == 0 || _g_.m.mcache == nil { 4205 throw("releasep: invalid arg") 4206 } 4207 _p_ := _g_.m.p.ptr() 4208 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 4209 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 4210 throw("releasep: invalid p state") 4211 } 4212 if trace.enabled { 4213 traceProcStop(_g_.m.p.ptr()) 4214 } 4215 _g_.m.p = 0 4216 _g_.m.mcache = nil 4217 _p_.m = 0 4218 _p_.status = _Pidle 4219 return _p_ 4220 } 4221 4222 func incidlelocked(v int32) { 4223 lock(&sched.lock) 4224 sched.nmidlelocked += v 4225 if v > 0 { 4226 checkdead() 4227 } 4228 unlock(&sched.lock) 4229 } 4230 4231 // Check for deadlock situation. 4232 // The check is based on number of running M's, if 0 -> deadlock. 4233 // sched.lock must be held. 4234 func checkdead() { 4235 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 4236 // there are no running goroutines. The calling program is 4237 // assumed to be running. 4238 if islibrary || isarchive { 4239 return 4240 } 4241 4242 // If we are dying because of a signal caught on an already idle thread, 4243 // freezetheworld will cause all running threads to block. 4244 // And runtime will essentially enter into deadlock state, 4245 // except that there is a thread that will call exit soon. 4246 if panicking > 0 { 4247 return 4248 } 4249 4250 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys 4251 if run > 0 { 4252 return 4253 } 4254 if run < 0 { 4255 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n") 4256 throw("checkdead: inconsistent counts") 4257 } 4258 4259 grunning := 0 4260 lock(&allglock) 4261 for i := 0; i < len(allgs); i++ { 4262 gp := allgs[i] 4263 if isSystemGoroutine(gp) { 4264 continue 4265 } 4266 s := readgstatus(gp) 4267 switch s &^ _Gscan { 4268 case _Gwaiting: 4269 grunning++ 4270 case _Grunnable, 4271 _Grunning, 4272 _Gsyscall: 4273 unlock(&allglock) 4274 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 4275 throw("checkdead: runnable g") 4276 } 4277 } 4278 unlock(&allglock) 4279 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 4280 throw("no goroutines (main called runtime.Goexit) - deadlock!") 4281 } 4282 4283 // Maybe jump time forward for playground. 4284 gp := timejump() 4285 if gp != nil { 4286 casgstatus(gp, _Gwaiting, _Grunnable) 4287 globrunqput(gp) 4288 _p_ := pidleget() 4289 if _p_ == nil { 4290 throw("checkdead: no p for timer") 4291 } 4292 mp := mget() 4293 if mp == nil { 4294 // There should always be a free M since 4295 // nothing is running. 4296 throw("checkdead: no m for timer") 4297 } 4298 mp.nextp.set(_p_) 4299 notewakeup(&mp.park) 4300 return 4301 } 4302 4303 //getg().m.throwing = -1 // do not dump full stacks 4304 throw("all goroutines are asleep - deadlock!") 4305 } 4306 4307 // forcegcperiod is the maximum time in nanoseconds between garbage 4308 // collections. If we go this long without a garbage collection, one 4309 // is forced to run. 4310 // 4311 // This is a variable for testing purposes. It normally doesn't change. 4312 var forcegcperiod int64 = 2 * 60 * 1e9 4313 4314 // Always runs without a P, so write barriers are not allowed. 4315 // 4316 //go:nowritebarrierrec 4317 func sysmon() { 4318 lock(&sched.lock) 4319 sched.nmsys++ 4320 checkdead() 4321 unlock(&sched.lock) 4322 4323 // If a heap span goes unused for 5 minutes after a garbage collection, 4324 // we hand it back to the operating system. 4325 scavengelimit := int64(5 * 60 * 1e9) 4326 4327 if debug.scavenge > 0 { 4328 // Scavenge-a-lot for testing. 4329 forcegcperiod = 10 * 1e6 4330 scavengelimit = 20 * 1e6 4331 } 4332 4333 lastscavenge := nanotime() 4334 nscavenge := 0 4335 4336 lasttrace := int64(0) 4337 idle := 0 // how many cycles in succession we had not wokeup somebody 4338 delay := uint32(0) 4339 for { 4340 if idle == 0 { // start with 20us sleep... 4341 delay = 20 4342 } else if idle > 50 { // start doubling the sleep after 1ms... 4343 delay *= 2 4344 } 4345 if delay > 10*1000 { // up to 10ms 4346 delay = 10 * 1000 4347 } 4348 usleep(delay) 4349 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { 4350 lock(&sched.lock) 4351 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 4352 atomic.Store(&sched.sysmonwait, 1) 4353 unlock(&sched.lock) 4354 // Make wake-up period small enough 4355 // for the sampling to be correct. 4356 maxsleep := forcegcperiod / 2 4357 if scavengelimit < forcegcperiod { 4358 maxsleep = scavengelimit / 2 4359 } 4360 shouldRelax := true 4361 if osRelaxMinNS > 0 { 4362 next := timeSleepUntil() 4363 now := nanotime() 4364 if next-now < osRelaxMinNS { 4365 shouldRelax = false 4366 } 4367 } 4368 if shouldRelax { 4369 osRelax(true) 4370 } 4371 notetsleep(&sched.sysmonnote, maxsleep) 4372 if shouldRelax { 4373 osRelax(false) 4374 } 4375 lock(&sched.lock) 4376 atomic.Store(&sched.sysmonwait, 0) 4377 noteclear(&sched.sysmonnote) 4378 idle = 0 4379 delay = 20 4380 } 4381 unlock(&sched.lock) 4382 } 4383 // trigger libc interceptors if needed 4384 if *cgo_yield != nil { 4385 asmcgocall(*cgo_yield, nil) 4386 } 4387 // poll network if not polled for more than 10ms 4388 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 4389 now := nanotime() 4390 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now { 4391 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 4392 gp := netpoll(false) // non-blocking - returns list of goroutines 4393 if gp != nil { 4394 // Need to decrement number of idle locked M's 4395 // (pretending that one more is running) before injectglist. 4396 // Otherwise it can lead to the following situation: 4397 // injectglist grabs all P's but before it starts M's to run the P's, 4398 // another M returns from syscall, finishes running its G, 4399 // observes that there is no work to do and no other running M's 4400 // and reports deadlock. 4401 incidlelocked(-1) 4402 injectglist(gp) 4403 incidlelocked(1) 4404 } 4405 } 4406 // retake P's blocked in syscalls 4407 // and preempt long running G's 4408 if retake(now) != 0 { 4409 idle = 0 4410 } else { 4411 idle++ 4412 } 4413 // check if we need to force a GC 4414 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { 4415 lock(&forcegc.lock) 4416 forcegc.idle = 0 4417 forcegc.g.schedlink = 0 4418 injectglist(forcegc.g) 4419 unlock(&forcegc.lock) 4420 } 4421 // scavenge heap once in a while 4422 if lastscavenge+scavengelimit/2 < now { 4423 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) 4424 lastscavenge = now 4425 nscavenge++ 4426 } 4427 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { 4428 lasttrace = now 4429 schedtrace(debug.scheddetail > 0) 4430 } 4431 } 4432 } 4433 4434 type sysmontick struct { 4435 schedtick uint32 4436 schedwhen int64 4437 syscalltick uint32 4438 syscallwhen int64 4439 } 4440 4441 // forcePreemptNS is the time slice given to a G before it is 4442 // preempted. 4443 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 4444 4445 func retake(now int64) uint32 { 4446 n := 0 4447 // Prevent allp slice changes. This lock will be completely 4448 // uncontended unless we're already stopping the world. 4449 lock(&allpLock) 4450 // We can't use a range loop over allp because we may 4451 // temporarily drop the allpLock. Hence, we need to re-fetch 4452 // allp each time around the loop. 4453 for i := 0; i < len(allp); i++ { 4454 _p_ := allp[i] 4455 if _p_ == nil { 4456 // This can happen if procresize has grown 4457 // allp but not yet created new Ps. 4458 continue 4459 } 4460 pd := &_p_.sysmontick 4461 s := _p_.status 4462 if s == _Psyscall { 4463 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 4464 t := int64(_p_.syscalltick) 4465 if int64(pd.syscalltick) != t { 4466 pd.syscalltick = uint32(t) 4467 pd.syscallwhen = now 4468 continue 4469 } 4470 // On the one hand we don't want to retake Ps if there is no other work to do, 4471 // but on the other hand we want to retake them eventually 4472 // because they can prevent the sysmon thread from deep sleep. 4473 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 4474 continue 4475 } 4476 // Drop allpLock so we can take sched.lock. 4477 unlock(&allpLock) 4478 // Need to decrement number of idle locked M's 4479 // (pretending that one more is running) before the CAS. 4480 // Otherwise the M from which we retake can exit the syscall, 4481 // increment nmidle and report deadlock. 4482 incidlelocked(-1) 4483 if atomic.Cas(&_p_.status, s, _Pidle) { 4484 if trace.enabled { 4485 traceGoSysBlock(_p_) 4486 traceProcStop(_p_) 4487 } 4488 n++ 4489 _p_.syscalltick++ 4490 handoffp(_p_) 4491 } 4492 incidlelocked(1) 4493 lock(&allpLock) 4494 } else if s == _Prunning { 4495 // Preempt G if it's running for too long. 4496 t := int64(_p_.schedtick) 4497 if int64(pd.schedtick) != t { 4498 pd.schedtick = uint32(t) 4499 pd.schedwhen = now 4500 continue 4501 } 4502 if pd.schedwhen+forcePreemptNS > now { 4503 continue 4504 } 4505 preemptone(_p_) 4506 } 4507 } 4508 unlock(&allpLock) 4509 return uint32(n) 4510 } 4511 4512 // Tell all goroutines that they have been preempted and they should stop. 4513 // This function is purely best-effort. It can fail to inform a goroutine if a 4514 // processor just started running it. 4515 // No locks need to be held. 4516 // Returns true if preemption request was issued to at least one goroutine. 4517 func preemptall() bool { 4518 res := false 4519 for _, _p_ := range allp { 4520 if _p_.status != _Prunning { 4521 continue 4522 } 4523 if preemptone(_p_) { 4524 res = true 4525 } 4526 } 4527 return res 4528 } 4529 4530 // Tell the goroutine running on processor P to stop. 4531 // This function is purely best-effort. It can incorrectly fail to inform the 4532 // goroutine. It can send inform the wrong goroutine. Even if it informs the 4533 // correct goroutine, that goroutine might ignore the request if it is 4534 // simultaneously executing newstack. 4535 // No lock needs to be held. 4536 // Returns true if preemption request was issued. 4537 // The actual preemption will happen at some point in the future 4538 // and will be indicated by the gp->status no longer being 4539 // Grunning 4540 func preemptone(_p_ *p) bool { 4541 mp := _p_.m.ptr() 4542 if mp == nil || mp == getg().m { 4543 return false 4544 } 4545 gp := mp.curg 4546 if gp == nil || gp == mp.g0 { 4547 return false 4548 } 4549 4550 gp.preempt = true 4551 4552 // Every call in a go routine checks for stack overflow by 4553 // comparing the current stack pointer to gp->stackguard0. 4554 // Setting gp->stackguard0 to StackPreempt folds 4555 // preemption into the normal stack overflow check. 4556 gp.stackguard0 = stackPreempt 4557 return true 4558 } 4559 4560 var starttime int64 4561 4562 func schedtrace(detailed bool) { 4563 now := nanotime() 4564 if starttime == 0 { 4565 starttime = now 4566 } 4567 4568 lock(&sched.lock) 4569 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 4570 if detailed { 4571 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 4572 } 4573 // We must be careful while reading data from P's, M's and G's. 4574 // Even if we hold schedlock, most data can be changed concurrently. 4575 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 4576 for i, _p_ := range allp { 4577 mp := _p_.m.ptr() 4578 h := atomic.Load(&_p_.runqhead) 4579 t := atomic.Load(&_p_.runqtail) 4580 if detailed { 4581 id := int64(-1) 4582 if mp != nil { 4583 id = mp.id 4584 } 4585 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") 4586 } else { 4587 // In non-detailed mode format lengths of per-P run queues as: 4588 // [len1 len2 len3 len4] 4589 print(" ") 4590 if i == 0 { 4591 print("[") 4592 } 4593 print(t - h) 4594 if i == len(allp)-1 { 4595 print("]\n") 4596 } 4597 } 4598 } 4599 4600 if !detailed { 4601 unlock(&sched.lock) 4602 return 4603 } 4604 4605 for mp := allm; mp != nil; mp = mp.alllink { 4606 _p_ := mp.p.ptr() 4607 gp := mp.curg 4608 lockedg := mp.lockedg.ptr() 4609 id1 := int32(-1) 4610 if _p_ != nil { 4611 id1 = _p_.id 4612 } 4613 id2 := int64(-1) 4614 if gp != nil { 4615 id2 = gp.goid 4616 } 4617 id3 := int64(-1) 4618 if lockedg != nil { 4619 id3 = lockedg.goid 4620 } 4621 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") 4622 } 4623 4624 lock(&allglock) 4625 for gi := 0; gi < len(allgs); gi++ { 4626 gp := allgs[gi] 4627 mp := gp.m 4628 lockedm := gp.lockedm.ptr() 4629 id1 := int64(-1) 4630 if mp != nil { 4631 id1 = mp.id 4632 } 4633 id2 := int64(-1) 4634 if lockedm != nil { 4635 id2 = lockedm.id 4636 } 4637 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n") 4638 } 4639 unlock(&allglock) 4640 unlock(&sched.lock) 4641 } 4642 4643 // Put mp on midle list. 4644 // Sched must be locked. 4645 // May run during STW, so write barriers are not allowed. 4646 //go:nowritebarrierrec 4647 func mput(mp *m) { 4648 mp.schedlink = sched.midle 4649 sched.midle.set(mp) 4650 sched.nmidle++ 4651 checkdead() 4652 } 4653 4654 // Try to get an m from midle list. 4655 // Sched must be locked. 4656 // May run during STW, so write barriers are not allowed. 4657 //go:nowritebarrierrec 4658 func mget() *m { 4659 mp := sched.midle.ptr() 4660 if mp != nil { 4661 sched.midle = mp.schedlink 4662 sched.nmidle-- 4663 } 4664 return mp 4665 } 4666 4667 // Put gp on the global runnable queue. 4668 // Sched must be locked. 4669 // May run during STW, so write barriers are not allowed. 4670 //go:nowritebarrierrec 4671 func globrunqput(gp *g) { 4672 gp.schedlink = 0 4673 if sched.runqtail != 0 { 4674 sched.runqtail.ptr().schedlink.set(gp) 4675 } else { 4676 sched.runqhead.set(gp) 4677 } 4678 sched.runqtail.set(gp) 4679 sched.runqsize++ 4680 } 4681 4682 // Put gp at the head of the global runnable queue. 4683 // Sched must be locked. 4684 // May run during STW, so write barriers are not allowed. 4685 //go:nowritebarrierrec 4686 func globrunqputhead(gp *g) { 4687 gp.schedlink = sched.runqhead 4688 sched.runqhead.set(gp) 4689 if sched.runqtail == 0 { 4690 sched.runqtail.set(gp) 4691 } 4692 sched.runqsize++ 4693 } 4694 4695 // Put a batch of runnable goroutines on the global runnable queue. 4696 // Sched must be locked. 4697 func globrunqputbatch(ghead *g, gtail *g, n int32) { 4698 gtail.schedlink = 0 4699 if sched.runqtail != 0 { 4700 sched.runqtail.ptr().schedlink.set(ghead) 4701 } else { 4702 sched.runqhead.set(ghead) 4703 } 4704 sched.runqtail.set(gtail) 4705 sched.runqsize += n 4706 } 4707 4708 // Try get a batch of G's from the global runnable queue. 4709 // Sched must be locked. 4710 func globrunqget(_p_ *p, max int32) *g { 4711 if sched.runqsize == 0 { 4712 return nil 4713 } 4714 4715 n := sched.runqsize/gomaxprocs + 1 4716 if n > sched.runqsize { 4717 n = sched.runqsize 4718 } 4719 if max > 0 && n > max { 4720 n = max 4721 } 4722 if n > int32(len(_p_.runq))/2 { 4723 n = int32(len(_p_.runq)) / 2 4724 } 4725 4726 sched.runqsize -= n 4727 if sched.runqsize == 0 { 4728 sched.runqtail = 0 4729 } 4730 4731 gp := sched.runqhead.ptr() 4732 sched.runqhead = gp.schedlink 4733 n-- 4734 for ; n > 0; n-- { 4735 gp1 := sched.runqhead.ptr() 4736 sched.runqhead = gp1.schedlink 4737 runqput(_p_, gp1, false) 4738 } 4739 return gp 4740 } 4741 4742 // Put p to on _Pidle list. 4743 // Sched must be locked. 4744 // May run during STW, so write barriers are not allowed. 4745 //go:nowritebarrierrec 4746 func pidleput(_p_ *p) { 4747 if !runqempty(_p_) { 4748 throw("pidleput: P has non-empty run queue") 4749 } 4750 _p_.link = sched.pidle 4751 sched.pidle.set(_p_) 4752 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 4753 } 4754 4755 // Try get a p from _Pidle list. 4756 // Sched must be locked. 4757 // May run during STW, so write barriers are not allowed. 4758 //go:nowritebarrierrec 4759 func pidleget() *p { 4760 _p_ := sched.pidle.ptr() 4761 if _p_ != nil { 4762 sched.pidle = _p_.link 4763 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 4764 } 4765 return _p_ 4766 } 4767 4768 // runqempty returns true if _p_ has no Gs on its local run queue. 4769 // It never returns true spuriously. 4770 func runqempty(_p_ *p) bool { 4771 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, 4772 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext. 4773 // Simply observing that runqhead == runqtail and then observing that runqnext == nil 4774 // does not mean the queue is empty. 4775 for { 4776 head := atomic.Load(&_p_.runqhead) 4777 tail := atomic.Load(&_p_.runqtail) 4778 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext))) 4779 if tail == atomic.Load(&_p_.runqtail) { 4780 return head == tail && runnext == 0 4781 } 4782 } 4783 } 4784 4785 // To shake out latent assumptions about scheduling order, 4786 // we introduce some randomness into scheduling decisions 4787 // when running with the race detector. 4788 // The need for this was made obvious by changing the 4789 // (deterministic) scheduling order in Go 1.5 and breaking 4790 // many poorly-written tests. 4791 // With the randomness here, as long as the tests pass 4792 // consistently with -race, they shouldn't have latent scheduling 4793 // assumptions. 4794 const randomizeScheduler = raceenabled 4795 4796 // runqput tries to put g on the local runnable queue. 4797 // If next if false, runqput adds g to the tail of the runnable queue. 4798 // If next is true, runqput puts g in the _p_.runnext slot. 4799 // If the run queue is full, runnext puts g on the global queue. 4800 // Executed only by the owner P. 4801 func runqput(_p_ *p, gp *g, next bool) { 4802 if randomizeScheduler && next && fastrand()%2 == 0 { 4803 next = false 4804 } 4805 4806 if next { 4807 retryNext: 4808 oldnext := _p_.runnext 4809 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 4810 goto retryNext 4811 } 4812 if oldnext == 0 { 4813 return 4814 } 4815 // Kick the old runnext out to the regular run queue. 4816 gp = oldnext.ptr() 4817 } 4818 4819 retry: 4820 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 4821 t := _p_.runqtail 4822 if t-h < uint32(len(_p_.runq)) { 4823 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 4824 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 4825 return 4826 } 4827 if runqputslow(_p_, gp, h, t) { 4828 return 4829 } 4830 // the queue is not full, now the put above must succeed 4831 goto retry 4832 } 4833 4834 // Put g and a batch of work from local runnable queue on global queue. 4835 // Executed only by the owner P. 4836 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 4837 var batch [len(_p_.runq)/2 + 1]*g 4838 4839 // First, grab a batch from local queue. 4840 n := t - h 4841 n = n / 2 4842 if n != uint32(len(_p_.runq)/2) { 4843 throw("runqputslow: queue is not full") 4844 } 4845 for i := uint32(0); i < n; i++ { 4846 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 4847 } 4848 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4849 return false 4850 } 4851 batch[n] = gp 4852 4853 if randomizeScheduler { 4854 for i := uint32(1); i <= n; i++ { 4855 j := fastrandn(i + 1) 4856 batch[i], batch[j] = batch[j], batch[i] 4857 } 4858 } 4859 4860 // Link the goroutines. 4861 for i := uint32(0); i < n; i++ { 4862 batch[i].schedlink.set(batch[i+1]) 4863 } 4864 4865 // Now put the batch on global queue. 4866 lock(&sched.lock) 4867 globrunqputbatch(batch[0], batch[n], int32(n+1)) 4868 unlock(&sched.lock) 4869 return true 4870 } 4871 4872 // Get g from local runnable queue. 4873 // If inheritTime is true, gp should inherit the remaining time in the 4874 // current time slice. Otherwise, it should start a new time slice. 4875 // Executed only by the owner P. 4876 func runqget(_p_ *p) (gp *g, inheritTime bool) { 4877 // If there's a runnext, it's the next G to run. 4878 for { 4879 next := _p_.runnext 4880 if next == 0 { 4881 break 4882 } 4883 if _p_.runnext.cas(next, 0) { 4884 return next.ptr(), true 4885 } 4886 } 4887 4888 for { 4889 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4890 t := _p_.runqtail 4891 if t == h { 4892 return nil, false 4893 } 4894 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 4895 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume 4896 return gp, false 4897 } 4898 } 4899 } 4900 4901 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 4902 // Batch is a ring buffer starting at batchHead. 4903 // Returns number of grabbed goroutines. 4904 // Can be executed by any P. 4905 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 4906 for { 4907 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4908 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer 4909 n := t - h 4910 n = n - n/2 4911 if n == 0 { 4912 if stealRunNextG { 4913 // Try to steal from _p_.runnext. 4914 if next := _p_.runnext; next != 0 { 4915 if _p_.status == _Prunning { 4916 // @aghosn: If it is enclave, we skip and trash, don't care. THUG LIFE 4917 if isEnclave { 4918 goto enclskip 4919 } 4920 // Sleep to ensure that _p_ isn't about to run the g 4921 // we are about to steal. 4922 // The important use case here is when the g running 4923 // on _p_ ready()s another g and then almost 4924 // immediately blocks. Instead of stealing runnext 4925 // in this window, back off to give _p_ a chance to 4926 // schedule runnext. This will avoid thrashing gs 4927 // between different Ps. 4928 // A sync chan send/recv takes ~50ns as of time of 4929 // writing, so 3us gives ~50x overshoot. 4930 if GOOS != "windows" { 4931 usleep(3) 4932 } else { 4933 // On windows system timer granularity is 4934 // 1-15ms, which is way too much for this 4935 // optimization. So just yield. 4936 osyield() 4937 } 4938 } 4939 enclskip: 4940 if !_p_.runnext.cas(next, 0) { 4941 continue 4942 } 4943 batch[batchHead%uint32(len(batch))] = next 4944 return 1 4945 } 4946 } 4947 return 0 4948 } 4949 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 4950 continue 4951 } 4952 for i := uint32(0); i < n; i++ { 4953 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 4954 batch[(batchHead+i)%uint32(len(batch))] = g 4955 } 4956 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4957 return n 4958 } 4959 } 4960 } 4961 4962 // Steal half of elements from local runnable queue of p2 4963 // and put onto local runnable queue of p. 4964 // Returns one of the stolen elements (or nil if failed). 4965 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 4966 t := _p_.runqtail 4967 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 4968 if n == 0 { 4969 return nil 4970 } 4971 n-- 4972 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 4973 if n == 0 { 4974 return gp 4975 } 4976 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 4977 if t-h+n >= uint32(len(_p_.runq)) { 4978 throw("runqsteal: runq overflow") 4979 } 4980 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 4981 return gp 4982 } 4983 4984 //go:linkname setMaxThreads runtime/debug.setMaxThreads 4985 func setMaxThreads(in int) (out int) { 4986 lock(&sched.lock) 4987 out = int(sched.maxmcount) 4988 if in > 0x7fffffff { // MaxInt32 4989 sched.maxmcount = 0x7fffffff 4990 } else { 4991 sched.maxmcount = int32(in) 4992 } 4993 checkmcount() 4994 unlock(&sched.lock) 4995 return 4996 } 4997 4998 func haveexperiment(name string) bool { 4999 if name == "framepointer" { 5000 return framepointer_enabled // set by linker 5001 } 5002 x := sys.Goexperiment 5003 for x != "" { 5004 xname := "" 5005 i := index(x, ",") 5006 if i < 0 { 5007 xname, x = x, "" 5008 } else { 5009 xname, x = x[:i], x[i+1:] 5010 } 5011 if xname == name { 5012 return true 5013 } 5014 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name { 5015 return false 5016 } 5017 } 5018 return false 5019 } 5020 5021 //go:nosplit 5022 func procPin() int { 5023 _g_ := getg() 5024 mp := _g_.m 5025 5026 mp.locks++ 5027 return int(mp.p.ptr().id) 5028 } 5029 5030 //go:nosplit 5031 func procUnpin() { 5032 _g_ := getg() 5033 _g_.m.locks-- 5034 } 5035 5036 //go:linkname sync_runtime_procPin sync.runtime_procPin 5037 //go:nosplit 5038 func sync_runtime_procPin() int { 5039 return procPin() 5040 } 5041 5042 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 5043 //go:nosplit 5044 func sync_runtime_procUnpin() { 5045 procUnpin() 5046 } 5047 5048 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 5049 //go:nosplit 5050 func sync_atomic_runtime_procPin() int { 5051 return procPin() 5052 } 5053 5054 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 5055 //go:nosplit 5056 func sync_atomic_runtime_procUnpin() { 5057 procUnpin() 5058 } 5059 5060 // Active spinning for sync.Mutex. 5061 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 5062 //go:nosplit 5063 func sync_runtime_canSpin(i int) bool { 5064 // sync.Mutex is cooperative, so we are conservative with spinning. 5065 // Spin only few times and only if running on a multicore machine and 5066 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 5067 // As opposed to runtime mutex we don't do passive spinning here, 5068 // because there can be work on global runq on on other Ps. 5069 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 5070 return false 5071 } 5072 if p := getg().m.p.ptr(); !runqempty(p) { 5073 return false 5074 } 5075 return true 5076 } 5077 5078 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 5079 //go:nosplit 5080 func sync_runtime_doSpin() { 5081 procyield(active_spin_cnt) 5082 } 5083 5084 var stealOrder randomOrder 5085 5086 // randomOrder/randomEnum are helper types for randomized work stealing. 5087 // They allow to enumerate all Ps in different pseudo-random orders without repetitions. 5088 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS 5089 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration. 5090 type randomOrder struct { 5091 count uint32 5092 coprimes []uint32 5093 } 5094 5095 type randomEnum struct { 5096 i uint32 5097 count uint32 5098 pos uint32 5099 inc uint32 5100 } 5101 5102 func (ord *randomOrder) reset(count uint32) { 5103 ord.count = count 5104 ord.coprimes = ord.coprimes[:0] 5105 for i := uint32(1); i <= count; i++ { 5106 if gcd(i, count) == 1 { 5107 ord.coprimes = append(ord.coprimes, i) 5108 } 5109 } 5110 } 5111 5112 func (ord *randomOrder) start(i uint32) randomEnum { 5113 return randomEnum{ 5114 count: ord.count, 5115 pos: i % ord.count, 5116 inc: ord.coprimes[i%uint32(len(ord.coprimes))], 5117 } 5118 } 5119 5120 func (enum *randomEnum) done() bool { 5121 return enum.i == enum.count 5122 } 5123 5124 func (enum *randomEnum) next() { 5125 enum.i++ 5126 enum.pos = (enum.pos + enum.inc) % enum.count 5127 } 5128 5129 func (enum *randomEnum) position() uint32 { 5130 return enum.pos 5131 } 5132 5133 func gcd(a, b uint32) uint32 { 5134 for b != 0 { 5135 a, b = b, a%b 5136 } 5137 return a 5138 }