github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/cpu" 9 "runtime/internal/atomic" 10 "runtime/internal/sys" 11 "unsafe" 12 ) 13 14 var buildVersion = sys.TheVersion 15 16 // Goroutine scheduler 17 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 18 // 19 // The main concepts are: 20 // G - goroutine. 21 // M - worker thread, or machine. 22 // P - processor, a resource that is required to execute Go code. 23 // M must have an associated P to execute Go code, however it can be 24 // blocked or in a syscall w/o an associated P. 25 // 26 // Design doc at https://golang.org/s/go11sched. 27 28 // Worker thread parking/unparking. 29 // We need to balance between keeping enough running worker threads to utilize 30 // available hardware parallelism and parking excessive running worker threads 31 // to conserve CPU resources and power. This is not simple for two reasons: 32 // (1) scheduler state is intentionally distributed (in particular, per-P work 33 // queues), so it is not possible to compute global predicates on fast paths; 34 // (2) for optimal thread management we would need to know the future (don't park 35 // a worker thread when a new goroutine will be readied in near future). 36 // 37 // Three rejected approaches that would work badly: 38 // 1. Centralize all scheduler state (would inhibit scalability). 39 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there 40 // is a spare P, unpark a thread and handoff it the thread and the goroutine. 41 // This would lead to thread state thrashing, as the thread that readied the 42 // goroutine can be out of work the very next moment, we will need to park it. 43 // Also, it would destroy locality of computation as we want to preserve 44 // dependent goroutines on the same thread; and introduce additional latency. 45 // 3. Unpark an additional thread whenever we ready a goroutine and there is an 46 // idle P, but don't do handoff. This would lead to excessive thread parking/ 47 // unparking as the additional threads will instantly park without discovering 48 // any work to do. 49 // 50 // The current approach: 51 // We unpark an additional thread when we ready a goroutine if (1) there is an 52 // idle P and there are no "spinning" worker threads. A worker thread is considered 53 // spinning if it is out of local work and did not find work in global run queue/ 54 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. 55 // Threads unparked this way are also considered spinning; we don't do goroutine 56 // handoff so such threads are out of work initially. Spinning threads do some 57 // spinning looking for work in per-P run queues before parking. If a spinning 58 // thread finds work it takes itself out of the spinning state and proceeds to 59 // execution. If it does not find work it takes itself out of the spinning state 60 // and then parks. 61 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark 62 // new threads when readying goroutines. To compensate for that, if the last spinning 63 // thread finds work and stops spinning, it must unpark a new spinning thread. 64 // This approach smooths out unjustified spikes of thread unparking, 65 // but at the same time guarantees eventual maximal CPU parallelism utilization. 66 // 67 // The main implementation complication is that we need to be very careful during 68 // spinning->non-spinning thread transition. This transition can race with submission 69 // of a new goroutine, and either one part or another needs to unpark another worker 70 // thread. If they both fail to do that, we can end up with semi-persistent CPU 71 // underutilization. The general pattern for goroutine readying is: submit a goroutine 72 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. 73 // The general pattern for spinning->non-spinning transition is: decrement nmspinning, 74 // #StoreLoad-style memory barrier, check all per-P work queues for new work. 75 // Note that all this complexity does not apply to global run queue as we are not 76 // sloppy about thread unparking when submitting to global queue. Also see comments 77 // for nmspinning manipulation. 78 79 var ( 80 m0 m 81 g0 g 82 raceprocctx0 uintptr 83 ) 84 85 //go:linkname runtime_init runtime.init 86 func runtime_init() 87 88 //go:linkname main_init main.init 89 func main_init() 90 91 // main_init_done is a signal used by cgocallbackg that initialization 92 // has been completed. It is made before _cgo_notify_runtime_init_done, 93 // so all cgo calls can rely on it existing. When main_init is complete, 94 // it is closed, meaning cgocallbackg can reliably receive from it. 95 var main_init_done chan bool 96 97 //go:linkname main_main main.main 98 func main_main() 99 100 // mainStarted indicates that the main M has started. 101 var mainStarted bool 102 103 // runtimeInitTime is the nanotime() at which the runtime started. 104 var runtimeInitTime int64 105 106 // Value to use for signal mask for newly created M's. 107 var initSigmask sigset 108 109 // The main goroutine. 110 func main() { 111 g := getg() 112 113 // Racectx of m0->g0 is used only as the parent of the main goroutine. 114 // It must not be used for anything else. 115 g.m.g0.racectx = 0 116 117 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 118 // Using decimal instead of binary GB and MB because 119 // they look nicer in the stack overflow failure message. 120 if sys.PtrSize == 8 { 121 maxstacksize = 1000000000 122 } else { 123 maxstacksize = 250000000 124 } 125 126 // Allow newproc to start new Ms. 127 mainStarted = true 128 129 if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon 130 systemstack(func() { 131 newm(sysmon, nil) 132 }) 133 } 134 135 // Lock the main goroutine onto this, the main OS thread, 136 // during initialization. Most programs won't care, but a few 137 // do require certain calls to be made by the main thread. 138 // Those can arrange for main.main to run in the main thread 139 // by calling runtime.LockOSThread during initialization 140 // to preserve the lock. 141 lockOSThread() 142 143 if g.m != &m0 { 144 throw("runtime.main not on m0") 145 } 146 147 runtime_init() // must be before defer 148 if nanotime() == 0 { 149 throw("nanotime returning zero") 150 } 151 152 // Defer unlock so that runtime.Goexit during init does the unlock too. 153 needUnlock := true 154 defer func() { 155 if needUnlock { 156 unlockOSThread() 157 } 158 }() 159 160 // Record when the world started. 161 runtimeInitTime = nanotime() 162 163 gcenable() 164 165 main_init_done = make(chan bool) 166 if iscgo { 167 if _cgo_thread_start == nil { 168 throw("_cgo_thread_start missing") 169 } 170 if GOOS != "windows" { 171 if _cgo_setenv == nil { 172 throw("_cgo_setenv missing") 173 } 174 if _cgo_unsetenv == nil { 175 throw("_cgo_unsetenv missing") 176 } 177 } 178 if _cgo_notify_runtime_init_done == nil { 179 throw("_cgo_notify_runtime_init_done missing") 180 } 181 // Start the template thread in case we enter Go from 182 // a C-created thread and need to create a new thread. 183 startTemplateThread() 184 cgocall(_cgo_notify_runtime_init_done, nil) 185 } 186 187 fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 188 fn() 189 close(main_init_done) 190 191 needUnlock = false 192 unlockOSThread() 193 194 if isarchive || islibrary { 195 // A program compiled with -buildmode=c-archive or c-shared 196 // has a main, but it is not executed. 197 return 198 } 199 fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 200 fn() 201 if raceenabled { 202 racefini() 203 } 204 205 // Make racy client program work: if panicking on 206 // another goroutine at the same time as main returns, 207 // let the other goroutine finish printing the panic trace. 208 // Once it does, it will exit. See issues 3934 and 20018. 209 if atomic.Load(&runningPanicDefers) != 0 { 210 // Running deferred functions should not take long. 211 for c := 0; c < 1000; c++ { 212 if atomic.Load(&runningPanicDefers) == 0 { 213 break 214 } 215 Gosched() 216 } 217 } 218 if atomic.Load(&panicking) != 0 { 219 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1) 220 } 221 222 exit(0) 223 for { 224 var x *int32 225 *x = 0 226 } 227 } 228 229 // os_beforeExit is called from os.Exit(0). 230 //go:linkname os_beforeExit os.runtime_beforeExit 231 func os_beforeExit() { 232 if raceenabled { 233 racefini() 234 } 235 } 236 237 // start forcegc helper goroutine 238 func init() { 239 go forcegchelper() 240 } 241 242 func forcegchelper() { 243 forcegc.g = getg() 244 for { 245 lock(&forcegc.lock) 246 if forcegc.idle != 0 { 247 throw("forcegc: phase error") 248 } 249 atomic.Store(&forcegc.idle, 1) 250 goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1) 251 // this goroutine is explicitly resumed by sysmon 252 if debug.gctrace > 0 { 253 println("GC forced") 254 } 255 // Time-triggered, fully concurrent. 256 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()}) 257 } 258 } 259 260 //go:nosplit 261 262 // Gosched yields the processor, allowing other goroutines to run. It does not 263 // suspend the current goroutine, so execution resumes automatically. 264 func Gosched() { 265 checkTimeouts() 266 mcall(gosched_m) 267 } 268 269 // goschedguarded yields the processor like gosched, but also checks 270 // for forbidden states and opts out of the yield in those cases. 271 //go:nosplit 272 func goschedguarded() { 273 mcall(goschedguarded_m) 274 } 275 276 // Puts the current goroutine into a waiting state and calls unlockf. 277 // If unlockf returns false, the goroutine is resumed. 278 // unlockf must not access this G's stack, as it may be moved between 279 // the call to gopark and the call to unlockf. 280 // Reason explains why the goroutine has been parked. 281 // It is displayed in stack traces and heap dumps. 282 // Reasons should be unique and descriptive. 283 // Do not re-use reasons, add new ones. 284 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) { 285 if reason != waitReasonSleep { 286 checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy 287 } 288 mp := acquirem() 289 gp := mp.curg 290 status := readgstatus(gp) 291 if status != _Grunning && status != _Gscanrunning { 292 throw("gopark: bad g status") 293 } 294 mp.waitlock = lock 295 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) 296 gp.waitreason = reason 297 mp.waittraceev = traceEv 298 mp.waittraceskip = traceskip 299 releasem(mp) 300 // can't do anything that might move the G between Ms here. 301 mcall(park_m) 302 } 303 304 // Puts the current goroutine into a waiting state and unlocks the lock. 305 // The goroutine can be made runnable again by calling goready(gp). 306 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) { 307 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 308 } 309 310 func goready(gp *g, traceskip int) { 311 systemstack(func() { 312 ready(gp, traceskip, true) 313 }) 314 } 315 316 //go:nosplit 317 func acquireSudog() *sudog { 318 // Delicate dance: the semaphore implementation calls 319 // acquireSudog, acquireSudog calls new(sudog), 320 // new calls malloc, malloc can call the garbage collector, 321 // and the garbage collector calls the semaphore implementation 322 // in stopTheWorld. 323 // Break the cycle by doing acquirem/releasem around new(sudog). 324 // The acquirem/releasem increments m.locks during new(sudog), 325 // which keeps the garbage collector from being invoked. 326 mp := acquirem() 327 pp := mp.p.ptr() 328 if len(pp.sudogcache) == 0 { 329 lock(&sched.sudoglock) 330 // First, try to grab a batch from central cache. 331 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 332 s := sched.sudogcache 333 sched.sudogcache = s.next 334 s.next = nil 335 pp.sudogcache = append(pp.sudogcache, s) 336 } 337 unlock(&sched.sudoglock) 338 // If the central cache is empty, allocate a new one. 339 if len(pp.sudogcache) == 0 { 340 pp.sudogcache = append(pp.sudogcache, new(sudog)) 341 } 342 } 343 n := len(pp.sudogcache) 344 s := pp.sudogcache[n-1] 345 pp.sudogcache[n-1] = nil 346 pp.sudogcache = pp.sudogcache[:n-1] 347 if s.elem != nil { 348 throw("acquireSudog: found s.elem != nil in cache") 349 } 350 releasem(mp) 351 return s 352 } 353 354 //go:nosplit 355 func releaseSudog(s *sudog) { 356 if s.elem != nil { 357 throw("runtime: sudog with non-nil elem") 358 } 359 if s.isSelect { 360 throw("runtime: sudog with non-false isSelect") 361 } 362 if s.next != nil { 363 throw("runtime: sudog with non-nil next") 364 } 365 if s.prev != nil { 366 throw("runtime: sudog with non-nil prev") 367 } 368 if s.waitlink != nil { 369 throw("runtime: sudog with non-nil waitlink") 370 } 371 if s.c != nil { 372 throw("runtime: sudog with non-nil c") 373 } 374 gp := getg() 375 if gp.param != nil { 376 throw("runtime: releaseSudog with non-nil gp.param") 377 } 378 mp := acquirem() // avoid rescheduling to another P 379 pp := mp.p.ptr() 380 if len(pp.sudogcache) == cap(pp.sudogcache) { 381 // Transfer half of local cache to the central cache. 382 var first, last *sudog 383 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 384 n := len(pp.sudogcache) 385 p := pp.sudogcache[n-1] 386 pp.sudogcache[n-1] = nil 387 pp.sudogcache = pp.sudogcache[:n-1] 388 if first == nil { 389 first = p 390 } else { 391 last.next = p 392 } 393 last = p 394 } 395 lock(&sched.sudoglock) 396 last.next = sched.sudogcache 397 sched.sudogcache = first 398 unlock(&sched.sudoglock) 399 } 400 pp.sudogcache = append(pp.sudogcache, s) 401 releasem(mp) 402 } 403 404 // funcPC returns the entry PC of the function f. 405 // It assumes that f is a func value. Otherwise the behavior is undefined. 406 // CAREFUL: In programs with plugins, funcPC can return different values 407 // for the same function (because there are actually multiple copies of 408 // the same function in the address space). To be safe, don't use the 409 // results of this function in any == expression. It is only safe to 410 // use the result as an address at which to start executing code. 411 //go:nosplit 412 func funcPC(f interface{}) uintptr { 413 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize)) 414 } 415 416 // called from assembly 417 func badmcall(fn func(*g)) { 418 throw("runtime: mcall called on m->g0 stack") 419 } 420 421 func badmcall2(fn func(*g)) { 422 throw("runtime: mcall function returned") 423 } 424 425 func badreflectcall() { 426 panic(plainError("arg size to reflect.call more than 1GB")) 427 } 428 429 var badmorestackg0Msg = "fatal: morestack on g0\n" 430 431 //go:nosplit 432 //go:nowritebarrierrec 433 func badmorestackg0() { 434 sp := stringStructOf(&badmorestackg0Msg) 435 write(2, sp.str, int32(sp.len)) 436 } 437 438 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n" 439 440 //go:nosplit 441 //go:nowritebarrierrec 442 func badmorestackgsignal() { 443 sp := stringStructOf(&badmorestackgsignalMsg) 444 write(2, sp.str, int32(sp.len)) 445 } 446 447 //go:nosplit 448 func badctxt() { 449 throw("ctxt != 0") 450 } 451 452 func lockedOSThread() bool { 453 gp := getg() 454 return gp.lockedm != 0 && gp.m.lockedg != 0 455 } 456 457 var ( 458 allgs []*g 459 allglock mutex 460 ) 461 462 func allgadd(gp *g) { 463 if readgstatus(gp) == _Gidle { 464 throw("allgadd: bad status Gidle") 465 } 466 467 lock(&allglock) 468 allgs = append(allgs, gp) 469 allglen = uintptr(len(allgs)) 470 unlock(&allglock) 471 } 472 473 const ( 474 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 475 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 476 _GoidCacheBatch = 16 477 ) 478 479 // cpuinit extracts the environment variable GODEBUG from the environment on 480 // Unix-like operating systems and calls internal/cpu.Initialize. 481 func cpuinit() { 482 const prefix = "GODEBUG=" 483 var env string 484 485 switch GOOS { 486 case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "solaris", "linux": 487 cpu.DebugOptions = true 488 489 // Similar to goenv_unix but extracts the environment value for 490 // GODEBUG directly. 491 // TODO(moehrmann): remove when general goenvs() can be called before cpuinit() 492 n := int32(0) 493 for argv_index(argv, argc+1+n) != nil { 494 n++ 495 } 496 497 for i := int32(0); i < n; i++ { 498 p := argv_index(argv, argc+1+i) 499 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)})) 500 501 if hasPrefix(s, prefix) { 502 env = gostring(p)[len(prefix):] 503 break 504 } 505 } 506 } 507 508 cpu.Initialize(env) 509 510 // Support cpu feature variables are used in code generated by the compiler 511 // to guard execution of instructions that can not be assumed to be always supported. 512 x86HasPOPCNT = cpu.X86.HasPOPCNT 513 x86HasSSE41 = cpu.X86.HasSSE41 514 515 arm64HasATOMICS = cpu.ARM64.HasATOMICS 516 } 517 518 // The bootstrap sequence is: 519 // 520 // call osinit 521 // call schedinit 522 // make & queue new G 523 // call runtime·mstart 524 // 525 // The new G calls runtime·main. 526 func schedinit() { 527 // raceinit must be the first call to race detector. 528 // In particular, it must be done before mallocinit below calls racemapshadow. 529 _g_ := getg() 530 if raceenabled { 531 _g_.racectx, raceprocctx0 = raceinit() 532 } 533 534 sched.maxmcount = 10000 535 536 tracebackinit() 537 moduledataverify() 538 stackinit() 539 mallocinit() 540 mcommoninit(_g_.m) 541 cpuinit() // must run before alginit 542 alginit() // maps must not be used before this call 543 modulesinit() // provides activeModules 544 typelinksinit() // uses maps, activeModules 545 itabsinit() // uses activeModules 546 547 msigsave(_g_.m) 548 initSigmask = _g_.m.sigmask 549 550 goargs() 551 goenvs() 552 parsedebugvars() 553 gcinit() 554 555 sched.lastpoll = uint64(nanotime()) 556 procs := ncpu 557 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { 558 procs = n 559 } 560 if procresize(procs) != nil { 561 throw("unknown runnable goroutine during bootstrap") 562 } 563 564 // For cgocheck > 1, we turn on the write barrier at all times 565 // and check all pointer writes. We can't do this until after 566 // procresize because the write barrier needs a P. 567 if debug.cgocheck > 1 { 568 writeBarrier.cgo = true 569 writeBarrier.enabled = true 570 for _, p := range allp { 571 p.wbBuf.reset() 572 } 573 } 574 575 if buildVersion == "" { 576 // Condition should never trigger. This code just serves 577 // to ensure runtime·buildVersion is kept in the resulting binary. 578 buildVersion = "unknown" 579 } 580 } 581 582 func dumpgstatus(gp *g) { 583 _g_ := getg() 584 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 585 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 586 } 587 588 func checkmcount() { 589 // sched lock is held 590 if mcount() > sched.maxmcount { 591 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 592 throw("thread exhaustion") 593 } 594 } 595 596 func mcommoninit(mp *m) { 597 _g_ := getg() 598 599 // g0 stack won't make sense for user (and is not necessary unwindable). 600 if _g_ != _g_.m.g0 { 601 callers(1, mp.createstack[:]) 602 } 603 604 lock(&sched.lock) 605 if sched.mnext+1 < sched.mnext { 606 throw("runtime: thread ID overflow") 607 } 608 mp.id = sched.mnext 609 sched.mnext++ 610 checkmcount() 611 612 mp.fastrand[0] = 1597334677 * uint32(mp.id) 613 mp.fastrand[1] = uint32(cputicks()) 614 if mp.fastrand[0]|mp.fastrand[1] == 0 { 615 mp.fastrand[1] = 1 616 } 617 618 mpreinit(mp) 619 if mp.gsignal != nil { 620 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 621 } 622 623 // Add to allm so garbage collector doesn't free g->m 624 // when it is just in a register or thread-local storage. 625 mp.alllink = allm 626 627 // NumCgoCall() iterates over allm w/o schedlock, 628 // so we need to publish it safely. 629 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 630 unlock(&sched.lock) 631 632 // Allocate memory to hold a cgo traceback if the cgo call crashes. 633 if iscgo || GOOS == "solaris" || GOOS == "windows" { 634 mp.cgoCallers = new(cgoCallers) 635 } 636 } 637 638 // Mark gp ready to run. 639 func ready(gp *g, traceskip int, next bool) { 640 if trace.enabled { 641 traceGoUnpark(gp, traceskip) 642 } 643 644 status := readgstatus(gp) 645 646 // Mark runnable. 647 _g_ := getg() 648 _g_.m.locks++ // disable preemption because it can be holding p in a local var 649 if status&^_Gscan != _Gwaiting { 650 dumpgstatus(gp) 651 throw("bad g->status in ready") 652 } 653 654 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 655 casgstatus(gp, _Gwaiting, _Grunnable) 656 runqput(_g_.m.p.ptr(), gp, next) 657 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 658 wakep() 659 } 660 _g_.m.locks-- 661 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack 662 _g_.stackguard0 = stackPreempt 663 } 664 } 665 666 // freezeStopWait is a large value that freezetheworld sets 667 // sched.stopwait to in order to request that all Gs permanently stop. 668 const freezeStopWait = 0x7fffffff 669 670 // freezing is set to non-zero if the runtime is trying to freeze the 671 // world. 672 var freezing uint32 673 674 // Similar to stopTheWorld but best-effort and can be called several times. 675 // There is no reverse operation, used during crashing. 676 // This function must not lock any mutexes. 677 func freezetheworld() { 678 atomic.Store(&freezing, 1) 679 // stopwait and preemption requests can be lost 680 // due to races with concurrently executing threads, 681 // so try several times 682 for i := 0; i < 5; i++ { 683 // this should tell the scheduler to not start any new goroutines 684 sched.stopwait = freezeStopWait 685 atomic.Store(&sched.gcwaiting, 1) 686 // this should stop running goroutines 687 if !preemptall() { 688 break // no running goroutines 689 } 690 usleep(1000) 691 } 692 // to be sure 693 usleep(1000) 694 preemptall() 695 usleep(1000) 696 } 697 698 func isscanstatus(status uint32) bool { 699 if status == _Gscan { 700 throw("isscanstatus: Bad status Gscan") 701 } 702 return status&_Gscan == _Gscan 703 } 704 705 // All reads and writes of g's status go through readgstatus, casgstatus 706 // castogscanstatus, casfrom_Gscanstatus. 707 //go:nosplit 708 func readgstatus(gp *g) uint32 { 709 return atomic.Load(&gp.atomicstatus) 710 } 711 712 // Ownership of gcscanvalid: 713 // 714 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), 715 // then gp owns gp.gcscanvalid, and other goroutines must not modify it. 716 // 717 // Otherwise, a second goroutine can lock the scan state by setting _Gscan 718 // in the status bit and then modify gcscanvalid, and then unlock the scan state. 719 // 720 // Note that the first condition implies an exception to the second: 721 // if a second goroutine changes gp's status to _Grunning|_Gscan, 722 // that second goroutine still does not have the right to modify gcscanvalid. 723 724 // The Gscanstatuses are acting like locks and this releases them. 725 // If it proves to be a performance hit we should be able to make these 726 // simple atomic stores but for now we are going to throw if 727 // we see an inconsistent state. 728 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 729 success := false 730 731 // Check that transition is valid. 732 switch oldval { 733 default: 734 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 735 dumpgstatus(gp) 736 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 737 case _Gscanrunnable, 738 _Gscanwaiting, 739 _Gscanrunning, 740 _Gscansyscall: 741 if newval == oldval&^_Gscan { 742 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 743 } 744 } 745 if !success { 746 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 747 dumpgstatus(gp) 748 throw("casfrom_Gscanstatus: gp->status is not in scan state") 749 } 750 } 751 752 // This will return false if the gp is not in the expected status and the cas fails. 753 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 754 func castogscanstatus(gp *g, oldval, newval uint32) bool { 755 switch oldval { 756 case _Grunnable, 757 _Grunning, 758 _Gwaiting, 759 _Gsyscall: 760 if newval == oldval|_Gscan { 761 return atomic.Cas(&gp.atomicstatus, oldval, newval) 762 } 763 } 764 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 765 throw("castogscanstatus") 766 panic("not reached") 767 } 768 769 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 770 // and casfrom_Gscanstatus instead. 771 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 772 // put it in the Gscan state is finished. 773 //go:nosplit 774 func casgstatus(gp *g, oldval, newval uint32) { 775 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 776 systemstack(func() { 777 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 778 throw("casgstatus: bad incoming values") 779 }) 780 } 781 782 if oldval == _Grunning && gp.gcscanvalid { 783 // If oldvall == _Grunning, then the actual status must be 784 // _Grunning or _Grunning|_Gscan; either way, 785 // we own gp.gcscanvalid, so it's safe to read. 786 // gp.gcscanvalid must not be true when we are running. 787 systemstack(func() { 788 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n") 789 throw("casgstatus") 790 }) 791 } 792 793 // See https://golang.org/cl/21503 for justification of the yield delay. 794 const yieldDelay = 5 * 1000 795 var nextYield int64 796 797 // loop if gp->atomicstatus is in a scan state giving 798 // GC time to finish and change the state to oldval. 799 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { 800 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 801 throw("casgstatus: waiting for Gwaiting but is Grunnable") 802 } 803 // Help GC if needed. 804 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { 805 // gp.preemptscan = false 806 // systemstack(func() { 807 // gcphasework(gp) 808 // }) 809 // } 810 // But meanwhile just yield. 811 if i == 0 { 812 nextYield = nanotime() + yieldDelay 813 } 814 if nanotime() < nextYield { 815 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { 816 procyield(1) 817 } 818 } else { 819 osyield() 820 nextYield = nanotime() + yieldDelay/2 821 } 822 } 823 if newval == _Grunning { 824 gp.gcscanvalid = false 825 } 826 } 827 828 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 829 // Returns old status. Cannot call casgstatus directly, because we are racing with an 830 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 831 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 832 // it would loop waiting for the status to go back to Gwaiting, which it never will. 833 //go:nosplit 834 func casgcopystack(gp *g) uint32 { 835 for { 836 oldstatus := readgstatus(gp) &^ _Gscan 837 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 838 throw("copystack: bad status, not Gwaiting or Grunnable") 839 } 840 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 841 return oldstatus 842 } 843 } 844 } 845 846 // scang blocks until gp's stack has been scanned. 847 // It might be scanned by scang or it might be scanned by the goroutine itself. 848 // Either way, the stack scan has completed when scang returns. 849 func scang(gp *g, gcw *gcWork) { 850 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone. 851 // Nothing is racing with us now, but gcscandone might be set to true left over 852 // from an earlier round of stack scanning (we scan twice per GC). 853 // We use gcscandone to record whether the scan has been done during this round. 854 855 gp.gcscandone = false 856 857 // See https://golang.org/cl/21503 for justification of the yield delay. 858 const yieldDelay = 10 * 1000 859 var nextYield int64 860 861 // Endeavor to get gcscandone set to true, 862 // either by doing the stack scan ourselves or by coercing gp to scan itself. 863 // gp.gcscandone can transition from false to true when we're not looking 864 // (if we asked for preemption), so any time we lock the status using 865 // castogscanstatus we have to double-check that the scan is still not done. 866 loop: 867 for i := 0; !gp.gcscandone; i++ { 868 switch s := readgstatus(gp); s { 869 default: 870 dumpgstatus(gp) 871 throw("stopg: invalid status") 872 873 case _Gdead: 874 // No stack. 875 gp.gcscandone = true 876 break loop 877 878 case _Gcopystack: 879 // Stack being switched. Go around again. 880 881 case _Grunnable, _Gsyscall, _Gwaiting: 882 // Claim goroutine by setting scan bit. 883 // Racing with execution or readying of gp. 884 // The scan bit keeps them from running 885 // the goroutine until we're done. 886 if castogscanstatus(gp, s, s|_Gscan) { 887 if !gp.gcscandone { 888 scanstack(gp, gcw) 889 gp.gcscandone = true 890 } 891 restartg(gp) 892 break loop 893 } 894 895 case _Gscanwaiting: 896 // newstack is doing a scan for us right now. Wait. 897 898 case _Grunning: 899 // Goroutine running. Try to preempt execution so it can scan itself. 900 // The preemption handler (in newstack) does the actual scan. 901 902 // Optimization: if there is already a pending preemption request 903 // (from the previous loop iteration), don't bother with the atomics. 904 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt { 905 break 906 } 907 908 // Ask for preemption and self scan. 909 if castogscanstatus(gp, _Grunning, _Gscanrunning) { 910 if !gp.gcscandone { 911 gp.preemptscan = true 912 gp.preempt = true 913 gp.stackguard0 = stackPreempt 914 } 915 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning) 916 } 917 } 918 919 if i == 0 { 920 nextYield = nanotime() + yieldDelay 921 } 922 if nanotime() < nextYield { 923 procyield(10) 924 } else { 925 osyield() 926 nextYield = nanotime() + yieldDelay/2 927 } 928 } 929 930 gp.preemptscan = false // cancel scan request if no longer needed 931 } 932 933 // The GC requests that this routine be moved from a scanmumble state to a mumble state. 934 func restartg(gp *g) { 935 s := readgstatus(gp) 936 switch s { 937 default: 938 dumpgstatus(gp) 939 throw("restartg: unexpected status") 940 941 case _Gdead: 942 // ok 943 944 case _Gscanrunnable, 945 _Gscanwaiting, 946 _Gscansyscall: 947 casfrom_Gscanstatus(gp, s, s&^_Gscan) 948 } 949 } 950 951 // stopTheWorld stops all P's from executing goroutines, interrupting 952 // all goroutines at GC safe points and records reason as the reason 953 // for the stop. On return, only the current goroutine's P is running. 954 // stopTheWorld must not be called from a system stack and the caller 955 // must not hold worldsema. The caller must call startTheWorld when 956 // other P's should resume execution. 957 // 958 // stopTheWorld is safe for multiple goroutines to call at the 959 // same time. Each will execute its own stop, and the stops will 960 // be serialized. 961 // 962 // This is also used by routines that do stack dumps. If the system is 963 // in panic or being exited, this may not reliably stop all 964 // goroutines. 965 func stopTheWorld(reason string) { 966 semacquire(&worldsema) 967 getg().m.preemptoff = reason 968 systemstack(stopTheWorldWithSema) 969 } 970 971 // startTheWorld undoes the effects of stopTheWorld. 972 func startTheWorld() { 973 systemstack(func() { startTheWorldWithSema(false) }) 974 // worldsema must be held over startTheWorldWithSema to ensure 975 // gomaxprocs cannot change while worldsema is held. 976 semrelease(&worldsema) 977 getg().m.preemptoff = "" 978 } 979 980 // Holding worldsema grants an M the right to try to stop the world 981 // and prevents gomaxprocs from changing concurrently. 982 var worldsema uint32 = 1 983 984 // stopTheWorldWithSema is the core implementation of stopTheWorld. 985 // The caller is responsible for acquiring worldsema and disabling 986 // preemption first and then should stopTheWorldWithSema on the system 987 // stack: 988 // 989 // semacquire(&worldsema, 0) 990 // m.preemptoff = "reason" 991 // systemstack(stopTheWorldWithSema) 992 // 993 // When finished, the caller must either call startTheWorld or undo 994 // these three operations separately: 995 // 996 // m.preemptoff = "" 997 // systemstack(startTheWorldWithSema) 998 // semrelease(&worldsema) 999 // 1000 // It is allowed to acquire worldsema once and then execute multiple 1001 // startTheWorldWithSema/stopTheWorldWithSema pairs. 1002 // Other P's are able to execute between successive calls to 1003 // startTheWorldWithSema and stopTheWorldWithSema. 1004 // Holding worldsema causes any other goroutines invoking 1005 // stopTheWorld to block. 1006 func stopTheWorldWithSema() { 1007 _g_ := getg() 1008 1009 // If we hold a lock, then we won't be able to stop another M 1010 // that is blocked trying to acquire the lock. 1011 if _g_.m.locks > 0 { 1012 throw("stopTheWorld: holding locks") 1013 } 1014 1015 lock(&sched.lock) 1016 sched.stopwait = gomaxprocs 1017 atomic.Store(&sched.gcwaiting, 1) 1018 preemptall() 1019 // stop current P 1020 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 1021 sched.stopwait-- 1022 // try to retake all P's in Psyscall status 1023 for _, p := range allp { 1024 s := p.status 1025 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 1026 if trace.enabled { 1027 traceGoSysBlock(p) 1028 traceProcStop(p) 1029 } 1030 p.syscalltick++ 1031 sched.stopwait-- 1032 } 1033 } 1034 // stop idle P's 1035 for { 1036 p := pidleget() 1037 if p == nil { 1038 break 1039 } 1040 p.status = _Pgcstop 1041 sched.stopwait-- 1042 } 1043 wait := sched.stopwait > 0 1044 unlock(&sched.lock) 1045 1046 // wait for remaining P's to stop voluntarily 1047 if wait { 1048 for { 1049 // wait for 100us, then try to re-preempt in case of any races 1050 if notetsleep(&sched.stopnote, 100*1000) { 1051 noteclear(&sched.stopnote) 1052 break 1053 } 1054 preemptall() 1055 } 1056 } 1057 1058 // sanity checks 1059 bad := "" 1060 if sched.stopwait != 0 { 1061 bad = "stopTheWorld: not stopped (stopwait != 0)" 1062 } else { 1063 for _, p := range allp { 1064 if p.status != _Pgcstop { 1065 bad = "stopTheWorld: not stopped (status != _Pgcstop)" 1066 } 1067 } 1068 } 1069 if atomic.Load(&freezing) != 0 { 1070 // Some other thread is panicking. This can cause the 1071 // sanity checks above to fail if the panic happens in 1072 // the signal handler on a stopped thread. Either way, 1073 // we should halt this thread. 1074 lock(&deadlock) 1075 lock(&deadlock) 1076 } 1077 if bad != "" { 1078 throw(bad) 1079 } 1080 } 1081 1082 func startTheWorldWithSema(emitTraceEvent bool) int64 { 1083 _g_ := getg() 1084 1085 _g_.m.locks++ // disable preemption because it can be holding p in a local var 1086 if netpollinited() { 1087 list := netpoll(false) // non-blocking 1088 injectglist(&list) 1089 } 1090 lock(&sched.lock) 1091 1092 procs := gomaxprocs 1093 if newprocs != 0 { 1094 procs = newprocs 1095 newprocs = 0 1096 } 1097 p1 := procresize(procs) 1098 sched.gcwaiting = 0 1099 if sched.sysmonwait != 0 { 1100 sched.sysmonwait = 0 1101 notewakeup(&sched.sysmonnote) 1102 } 1103 unlock(&sched.lock) 1104 1105 for p1 != nil { 1106 p := p1 1107 p1 = p1.link.ptr() 1108 if p.m != 0 { 1109 mp := p.m.ptr() 1110 p.m = 0 1111 if mp.nextp != 0 { 1112 throw("startTheWorld: inconsistent mp->nextp") 1113 } 1114 mp.nextp.set(p) 1115 notewakeup(&mp.park) 1116 } else { 1117 // Start M to run P. Do not start another M below. 1118 newm(nil, p) 1119 } 1120 } 1121 1122 // Capture start-the-world time before doing clean-up tasks. 1123 startTime := nanotime() 1124 if emitTraceEvent { 1125 traceGCSTWDone() 1126 } 1127 1128 // Wakeup an additional proc in case we have excessive runnable goroutines 1129 // in local queues or in the global queue. If we don't, the proc will park itself. 1130 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 1131 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 1132 wakep() 1133 } 1134 1135 _g_.m.locks-- 1136 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1137 _g_.stackguard0 = stackPreempt 1138 } 1139 1140 return startTime 1141 } 1142 1143 // Called to start an M. 1144 // 1145 // This must not split the stack because we may not even have stack 1146 // bounds set up yet. 1147 // 1148 // May run during STW (because it doesn't have a P yet), so write 1149 // barriers are not allowed. 1150 // 1151 //go:nosplit 1152 //go:nowritebarrierrec 1153 func mstart() { 1154 _g_ := getg() 1155 1156 osStack := _g_.stack.lo == 0 1157 if osStack { 1158 // Initialize stack bounds from system stack. 1159 // Cgo may have left stack size in stack.hi. 1160 // minit may update the stack bounds. 1161 size := _g_.stack.hi 1162 if size == 0 { 1163 size = 8192 * sys.StackGuardMultiplier 1164 } 1165 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1166 _g_.stack.lo = _g_.stack.hi - size + 1024 1167 } 1168 // Initialize stack guards so that we can start calling 1169 // both Go and C functions with stack growth prologues. 1170 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1171 _g_.stackguard1 = _g_.stackguard0 1172 mstart1() 1173 1174 // Exit this thread. 1175 if GOOS == "windows" || GOOS == "solaris" || GOOS == "plan9" || GOOS == "darwin" || GOOS == "aix" { 1176 // Window, Solaris, Darwin, AIX and Plan 9 always system-allocate 1177 // the stack, but put it in _g_.stack before mstart, 1178 // so the logic above hasn't set osStack yet. 1179 osStack = true 1180 } 1181 mexit(osStack) 1182 } 1183 1184 func mstart1() { 1185 _g_ := getg() 1186 1187 if _g_ != _g_.m.g0 { 1188 throw("bad runtime·mstart") 1189 } 1190 1191 // Record the caller for use as the top of stack in mcall and 1192 // for terminating the thread. 1193 // We're never coming back to mstart1 after we call schedule, 1194 // so other calls can reuse the current frame. 1195 save(getcallerpc(), getcallersp()) 1196 asminit() 1197 minit() 1198 1199 // Install signal handlers; after minit so that minit can 1200 // prepare the thread to be able to handle the signals. 1201 if _g_.m == &m0 { 1202 mstartm0() 1203 } 1204 1205 if fn := _g_.m.mstartfn; fn != nil { 1206 fn() 1207 } 1208 1209 if _g_.m != &m0 { 1210 acquirep(_g_.m.nextp.ptr()) 1211 _g_.m.nextp = 0 1212 } 1213 schedule() 1214 } 1215 1216 // mstartm0 implements part of mstart1 that only runs on the m0. 1217 // 1218 // Write barriers are allowed here because we know the GC can't be 1219 // running yet, so they'll be no-ops. 1220 // 1221 //go:yeswritebarrierrec 1222 func mstartm0() { 1223 // Create an extra M for callbacks on threads not created by Go. 1224 // An extra M is also needed on Windows for callbacks created by 1225 // syscall.NewCallback. See issue #6751 for details. 1226 if (iscgo || GOOS == "windows") && !cgoHasExtraM { 1227 cgoHasExtraM = true 1228 newextram() 1229 } 1230 initsig(false) 1231 } 1232 1233 // mexit tears down and exits the current thread. 1234 // 1235 // Don't call this directly to exit the thread, since it must run at 1236 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to 1237 // unwind the stack to the point that exits the thread. 1238 // 1239 // It is entered with m.p != nil, so write barriers are allowed. It 1240 // will release the P before exiting. 1241 // 1242 //go:yeswritebarrierrec 1243 func mexit(osStack bool) { 1244 g := getg() 1245 m := g.m 1246 1247 if m == &m0 { 1248 // This is the main thread. Just wedge it. 1249 // 1250 // On Linux, exiting the main thread puts the process 1251 // into a non-waitable zombie state. On Plan 9, 1252 // exiting the main thread unblocks wait even though 1253 // other threads are still running. On Solaris we can 1254 // neither exitThread nor return from mstart. Other 1255 // bad things probably happen on other platforms. 1256 // 1257 // We could try to clean up this M more before wedging 1258 // it, but that complicates signal handling. 1259 handoffp(releasep()) 1260 lock(&sched.lock) 1261 sched.nmfreed++ 1262 checkdead() 1263 unlock(&sched.lock) 1264 notesleep(&m.park) 1265 throw("locked m0 woke up") 1266 } 1267 1268 sigblock() 1269 unminit() 1270 1271 // Free the gsignal stack. 1272 if m.gsignal != nil { 1273 stackfree(m.gsignal.stack) 1274 } 1275 1276 // Remove m from allm. 1277 lock(&sched.lock) 1278 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink { 1279 if *pprev == m { 1280 *pprev = m.alllink 1281 goto found 1282 } 1283 } 1284 throw("m not found in allm") 1285 found: 1286 if !osStack { 1287 // Delay reaping m until it's done with the stack. 1288 // 1289 // If this is using an OS stack, the OS will free it 1290 // so there's no need for reaping. 1291 atomic.Store(&m.freeWait, 1) 1292 // Put m on the free list, though it will not be reaped until 1293 // freeWait is 0. Note that the free list must not be linked 1294 // through alllink because some functions walk allm without 1295 // locking, so may be using alllink. 1296 m.freelink = sched.freem 1297 sched.freem = m 1298 } 1299 unlock(&sched.lock) 1300 1301 // Release the P. 1302 handoffp(releasep()) 1303 // After this point we must not have write barriers. 1304 1305 // Invoke the deadlock detector. This must happen after 1306 // handoffp because it may have started a new M to take our 1307 // P's work. 1308 lock(&sched.lock) 1309 sched.nmfreed++ 1310 checkdead() 1311 unlock(&sched.lock) 1312 1313 if osStack { 1314 // Return from mstart and let the system thread 1315 // library free the g0 stack and terminate the thread. 1316 return 1317 } 1318 1319 // mstart is the thread's entry point, so there's nothing to 1320 // return to. Exit the thread directly. exitThread will clear 1321 // m.freeWait when it's done with the stack and the m can be 1322 // reaped. 1323 exitThread(&m.freeWait) 1324 } 1325 1326 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1327 // If a P is currently executing code, this will bring the P to a GC 1328 // safe point and execute fn on that P. If the P is not executing code 1329 // (it is idle or in a syscall), this will call fn(p) directly while 1330 // preventing the P from exiting its state. This does not ensure that 1331 // fn will run on every CPU executing Go code, but it acts as a global 1332 // memory barrier. GC uses this as a "ragged barrier." 1333 // 1334 // The caller must hold worldsema. 1335 // 1336 //go:systemstack 1337 func forEachP(fn func(*p)) { 1338 mp := acquirem() 1339 _p_ := getg().m.p.ptr() 1340 1341 lock(&sched.lock) 1342 if sched.safePointWait != 0 { 1343 throw("forEachP: sched.safePointWait != 0") 1344 } 1345 sched.safePointWait = gomaxprocs - 1 1346 sched.safePointFn = fn 1347 1348 // Ask all Ps to run the safe point function. 1349 for _, p := range allp { 1350 if p != _p_ { 1351 atomic.Store(&p.runSafePointFn, 1) 1352 } 1353 } 1354 preemptall() 1355 1356 // Any P entering _Pidle or _Psyscall from now on will observe 1357 // p.runSafePointFn == 1 and will call runSafePointFn when 1358 // changing its status to _Pidle/_Psyscall. 1359 1360 // Run safe point function for all idle Ps. sched.pidle will 1361 // not change because we hold sched.lock. 1362 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1363 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1364 fn(p) 1365 sched.safePointWait-- 1366 } 1367 } 1368 1369 wait := sched.safePointWait > 0 1370 unlock(&sched.lock) 1371 1372 // Run fn for the current P. 1373 fn(_p_) 1374 1375 // Force Ps currently in _Psyscall into _Pidle and hand them 1376 // off to induce safe point function execution. 1377 for _, p := range allp { 1378 s := p.status 1379 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1380 if trace.enabled { 1381 traceGoSysBlock(p) 1382 traceProcStop(p) 1383 } 1384 p.syscalltick++ 1385 handoffp(p) 1386 } 1387 } 1388 1389 // Wait for remaining Ps to run fn. 1390 if wait { 1391 for { 1392 // Wait for 100us, then try to re-preempt in 1393 // case of any races. 1394 // 1395 // Requires system stack. 1396 if notetsleep(&sched.safePointNote, 100*1000) { 1397 noteclear(&sched.safePointNote) 1398 break 1399 } 1400 preemptall() 1401 } 1402 } 1403 if sched.safePointWait != 0 { 1404 throw("forEachP: not done") 1405 } 1406 for _, p := range allp { 1407 if p.runSafePointFn != 0 { 1408 throw("forEachP: P did not run fn") 1409 } 1410 } 1411 1412 lock(&sched.lock) 1413 sched.safePointFn = nil 1414 unlock(&sched.lock) 1415 releasem(mp) 1416 } 1417 1418 // runSafePointFn runs the safe point function, if any, for this P. 1419 // This should be called like 1420 // 1421 // if getg().m.p.runSafePointFn != 0 { 1422 // runSafePointFn() 1423 // } 1424 // 1425 // runSafePointFn must be checked on any transition in to _Pidle or 1426 // _Psyscall to avoid a race where forEachP sees that the P is running 1427 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1428 // nor the P run the safe-point function. 1429 func runSafePointFn() { 1430 p := getg().m.p.ptr() 1431 // Resolve the race between forEachP running the safe-point 1432 // function on this P's behalf and this P running the 1433 // safe-point function directly. 1434 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1435 return 1436 } 1437 sched.safePointFn(p) 1438 lock(&sched.lock) 1439 sched.safePointWait-- 1440 if sched.safePointWait == 0 { 1441 notewakeup(&sched.safePointNote) 1442 } 1443 unlock(&sched.lock) 1444 } 1445 1446 // When running with cgo, we call _cgo_thread_start 1447 // to start threads for us so that we can play nicely with 1448 // foreign code. 1449 var cgoThreadStart unsafe.Pointer 1450 1451 type cgothreadstart struct { 1452 g guintptr 1453 tls *uint64 1454 fn unsafe.Pointer 1455 } 1456 1457 // Allocate a new m unassociated with any thread. 1458 // Can use p for allocation context if needed. 1459 // fn is recorded as the new m's m.mstartfn. 1460 // 1461 // This function is allowed to have write barriers even if the caller 1462 // isn't because it borrows _p_. 1463 // 1464 //go:yeswritebarrierrec 1465 func allocm(_p_ *p, fn func()) *m { 1466 _g_ := getg() 1467 _g_.m.locks++ // disable GC because it can be called from sysmon 1468 if _g_.m.p == 0 { 1469 acquirep(_p_) // temporarily borrow p for mallocs in this function 1470 } 1471 1472 // Release the free M list. We need to do this somewhere and 1473 // this may free up a stack we can use. 1474 if sched.freem != nil { 1475 lock(&sched.lock) 1476 var newList *m 1477 for freem := sched.freem; freem != nil; { 1478 if freem.freeWait != 0 { 1479 next := freem.freelink 1480 freem.freelink = newList 1481 newList = freem 1482 freem = next 1483 continue 1484 } 1485 stackfree(freem.g0.stack) 1486 freem = freem.freelink 1487 } 1488 sched.freem = newList 1489 unlock(&sched.lock) 1490 } 1491 1492 mp := new(m) 1493 mp.mstartfn = fn 1494 mcommoninit(mp) 1495 1496 // In case of cgo or Solaris or Darwin, pthread_create will make us a stack. 1497 // Windows and Plan 9 will layout sched stack on OS stack. 1498 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" || GOOS == "darwin" { 1499 mp.g0 = malg(-1) 1500 } else { 1501 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1502 } 1503 mp.g0.m = mp 1504 1505 if _p_ == _g_.m.p.ptr() { 1506 releasep() 1507 } 1508 _g_.m.locks-- 1509 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1510 _g_.stackguard0 = stackPreempt 1511 } 1512 1513 return mp 1514 } 1515 1516 // needm is called when a cgo callback happens on a 1517 // thread without an m (a thread not created by Go). 1518 // In this case, needm is expected to find an m to use 1519 // and return with m, g initialized correctly. 1520 // Since m and g are not set now (likely nil, but see below) 1521 // needm is limited in what routines it can call. In particular 1522 // it can only call nosplit functions (textflag 7) and cannot 1523 // do any scheduling that requires an m. 1524 // 1525 // In order to avoid needing heavy lifting here, we adopt 1526 // the following strategy: there is a stack of available m's 1527 // that can be stolen. Using compare-and-swap 1528 // to pop from the stack has ABA races, so we simulate 1529 // a lock by doing an exchange (via Casuintptr) to steal the stack 1530 // head and replace the top pointer with MLOCKED (1). 1531 // This serves as a simple spin lock that we can use even 1532 // without an m. The thread that locks the stack in this way 1533 // unlocks the stack by storing a valid stack head pointer. 1534 // 1535 // In order to make sure that there is always an m structure 1536 // available to be stolen, we maintain the invariant that there 1537 // is always one more than needed. At the beginning of the 1538 // program (if cgo is in use) the list is seeded with a single m. 1539 // If needm finds that it has taken the last m off the list, its job 1540 // is - once it has installed its own m so that it can do things like 1541 // allocate memory - to create a spare m and put it on the list. 1542 // 1543 // Each of these extra m's also has a g0 and a curg that are 1544 // pressed into service as the scheduling stack and current 1545 // goroutine for the duration of the cgo callback. 1546 // 1547 // When the callback is done with the m, it calls dropm to 1548 // put the m back on the list. 1549 //go:nosplit 1550 func needm(x byte) { 1551 if (iscgo || GOOS == "windows") && !cgoHasExtraM { 1552 // Can happen if C/C++ code calls Go from a global ctor. 1553 // Can also happen on Windows if a global ctor uses a 1554 // callback created by syscall.NewCallback. See issue #6751 1555 // for details. 1556 // 1557 // Can not throw, because scheduler is not initialized yet. 1558 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1559 exit(1) 1560 } 1561 1562 // Lock extra list, take head, unlock popped list. 1563 // nilokay=false is safe here because of the invariant above, 1564 // that the extra list always contains or will soon contain 1565 // at least one m. 1566 mp := lockextra(false) 1567 1568 // Set needextram when we've just emptied the list, 1569 // so that the eventual call into cgocallbackg will 1570 // allocate a new m for the extra list. We delay the 1571 // allocation until then so that it can be done 1572 // after exitsyscall makes sure it is okay to be 1573 // running at all (that is, there's no garbage collection 1574 // running right now). 1575 mp.needextram = mp.schedlink == 0 1576 extraMCount-- 1577 unlockextra(mp.schedlink.ptr()) 1578 1579 // Save and block signals before installing g. 1580 // Once g is installed, any incoming signals will try to execute, 1581 // but we won't have the sigaltstack settings and other data 1582 // set up appropriately until the end of minit, which will 1583 // unblock the signals. This is the same dance as when 1584 // starting a new m to run Go code via newosproc. 1585 msigsave(mp) 1586 sigblock() 1587 1588 // Install g (= m->g0) and set the stack bounds 1589 // to match the current stack. We don't actually know 1590 // how big the stack is, like we don't know how big any 1591 // scheduling stack is, but we assume there's at least 32 kB, 1592 // which is more than enough for us. 1593 setg(mp.g0) 1594 _g_ := getg() 1595 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 1596 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 1597 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1598 1599 // Initialize this thread to use the m. 1600 asminit() 1601 minit() 1602 1603 // mp.curg is now a real goroutine. 1604 casgstatus(mp.curg, _Gdead, _Gsyscall) 1605 atomic.Xadd(&sched.ngsys, -1) 1606 } 1607 1608 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 1609 1610 // newextram allocates m's and puts them on the extra list. 1611 // It is called with a working local m, so that it can do things 1612 // like call schedlock and allocate. 1613 func newextram() { 1614 c := atomic.Xchg(&extraMWaiters, 0) 1615 if c > 0 { 1616 for i := uint32(0); i < c; i++ { 1617 oneNewExtraM() 1618 } 1619 } else { 1620 // Make sure there is at least one extra M. 1621 mp := lockextra(true) 1622 unlockextra(mp) 1623 if mp == nil { 1624 oneNewExtraM() 1625 } 1626 } 1627 } 1628 1629 // oneNewExtraM allocates an m and puts it on the extra list. 1630 func oneNewExtraM() { 1631 // Create extra goroutine locked to extra m. 1632 // The goroutine is the context in which the cgo callback will run. 1633 // The sched.pc will never be returned to, but setting it to 1634 // goexit makes clear to the traceback routines where 1635 // the goroutine stack ends. 1636 mp := allocm(nil, nil) 1637 gp := malg(4096) 1638 gp.sched.pc = funcPC(goexit) + sys.PCQuantum 1639 gp.sched.sp = gp.stack.hi 1640 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame 1641 gp.sched.lr = 0 1642 gp.sched.g = guintptr(unsafe.Pointer(gp)) 1643 gp.syscallpc = gp.sched.pc 1644 gp.syscallsp = gp.sched.sp 1645 gp.stktopsp = gp.sched.sp 1646 gp.gcscanvalid = true 1647 gp.gcscandone = true 1648 // malg returns status as _Gidle. Change to _Gdead before 1649 // adding to allg where GC can see it. We use _Gdead to hide 1650 // this from tracebacks and stack scans since it isn't a 1651 // "real" goroutine until needm grabs it. 1652 casgstatus(gp, _Gidle, _Gdead) 1653 gp.m = mp 1654 mp.curg = gp 1655 mp.lockedInt++ 1656 mp.lockedg.set(gp) 1657 gp.lockedm.set(mp) 1658 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 1659 if raceenabled { 1660 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum) 1661 } 1662 // put on allg for garbage collector 1663 allgadd(gp) 1664 1665 // gp is now on the allg list, but we don't want it to be 1666 // counted by gcount. It would be more "proper" to increment 1667 // sched.ngfree, but that requires locking. Incrementing ngsys 1668 // has the same effect. 1669 atomic.Xadd(&sched.ngsys, +1) 1670 1671 // Add m to the extra list. 1672 mnext := lockextra(true) 1673 mp.schedlink.set(mnext) 1674 extraMCount++ 1675 unlockextra(mp) 1676 } 1677 1678 // dropm is called when a cgo callback has called needm but is now 1679 // done with the callback and returning back into the non-Go thread. 1680 // It puts the current m back onto the extra list. 1681 // 1682 // The main expense here is the call to signalstack to release the 1683 // m's signal stack, and then the call to needm on the next callback 1684 // from this thread. It is tempting to try to save the m for next time, 1685 // which would eliminate both these costs, but there might not be 1686 // a next time: the current thread (which Go does not control) might exit. 1687 // If we saved the m for that thread, there would be an m leak each time 1688 // such a thread exited. Instead, we acquire and release an m on each 1689 // call. These should typically not be scheduling operations, just a few 1690 // atomics, so the cost should be small. 1691 // 1692 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1693 // variable using pthread_key_create. Unlike the pthread keys we already use 1694 // on OS X, this dummy key would never be read by Go code. It would exist 1695 // only so that we could register at thread-exit-time destructor. 1696 // That destructor would put the m back onto the extra list. 1697 // This is purely a performance optimization. The current version, 1698 // in which dropm happens on each cgo call, is still correct too. 1699 // We may have to keep the current version on systems with cgo 1700 // but without pthreads, like Windows. 1701 func dropm() { 1702 // Clear m and g, and return m to the extra list. 1703 // After the call to setg we can only call nosplit functions 1704 // with no pointer manipulation. 1705 mp := getg().m 1706 1707 // Return mp.curg to dead state. 1708 casgstatus(mp.curg, _Gsyscall, _Gdead) 1709 atomic.Xadd(&sched.ngsys, +1) 1710 1711 // Block signals before unminit. 1712 // Unminit unregisters the signal handling stack (but needs g on some systems). 1713 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 1714 // It's important not to try to handle a signal between those two steps. 1715 sigmask := mp.sigmask 1716 sigblock() 1717 unminit() 1718 1719 mnext := lockextra(true) 1720 extraMCount++ 1721 mp.schedlink.set(mnext) 1722 1723 setg(nil) 1724 1725 // Commit the release of mp. 1726 unlockextra(mp) 1727 1728 msigrestore(sigmask) 1729 } 1730 1731 // A helper function for EnsureDropM. 1732 func getm() uintptr { 1733 return uintptr(unsafe.Pointer(getg().m)) 1734 } 1735 1736 var extram uintptr 1737 var extraMCount uint32 // Protected by lockextra 1738 var extraMWaiters uint32 1739 1740 // lockextra locks the extra list and returns the list head. 1741 // The caller must unlock the list by storing a new list head 1742 // to extram. If nilokay is true, then lockextra will 1743 // return a nil list head if that's what it finds. If nilokay is false, 1744 // lockextra will keep waiting until the list head is no longer nil. 1745 //go:nosplit 1746 func lockextra(nilokay bool) *m { 1747 const locked = 1 1748 1749 incr := false 1750 for { 1751 old := atomic.Loaduintptr(&extram) 1752 if old == locked { 1753 yield := osyield 1754 yield() 1755 continue 1756 } 1757 if old == 0 && !nilokay { 1758 if !incr { 1759 // Add 1 to the number of threads 1760 // waiting for an M. 1761 // This is cleared by newextram. 1762 atomic.Xadd(&extraMWaiters, 1) 1763 incr = true 1764 } 1765 usleep(1) 1766 continue 1767 } 1768 if atomic.Casuintptr(&extram, old, locked) { 1769 return (*m)(unsafe.Pointer(old)) 1770 } 1771 yield := osyield 1772 yield() 1773 continue 1774 } 1775 } 1776 1777 //go:nosplit 1778 func unlockextra(mp *m) { 1779 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1780 } 1781 1782 // execLock serializes exec and clone to avoid bugs or unspecified behaviour 1783 // around exec'ing while creating/destroying threads. See issue #19546. 1784 var execLock rwmutex 1785 1786 // newmHandoff contains a list of m structures that need new OS threads. 1787 // This is used by newm in situations where newm itself can't safely 1788 // start an OS thread. 1789 var newmHandoff struct { 1790 lock mutex 1791 1792 // newm points to a list of M structures that need new OS 1793 // threads. The list is linked through m.schedlink. 1794 newm muintptr 1795 1796 // waiting indicates that wake needs to be notified when an m 1797 // is put on the list. 1798 waiting bool 1799 wake note 1800 1801 // haveTemplateThread indicates that the templateThread has 1802 // been started. This is not protected by lock. Use cas to set 1803 // to 1. 1804 haveTemplateThread uint32 1805 } 1806 1807 // Create a new m. It will start off with a call to fn, or else the scheduler. 1808 // fn needs to be static and not a heap allocated closure. 1809 // May run with m.p==nil, so write barriers are not allowed. 1810 //go:nowritebarrierrec 1811 func newm(fn func(), _p_ *p) { 1812 mp := allocm(_p_, fn) 1813 mp.nextp.set(_p_) 1814 mp.sigmask = initSigmask 1815 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" { 1816 // We're on a locked M or a thread that may have been 1817 // started by C. The kernel state of this thread may 1818 // be strange (the user may have locked it for that 1819 // purpose). We don't want to clone that into another 1820 // thread. Instead, ask a known-good thread to create 1821 // the thread for us. 1822 // 1823 // This is disabled on Plan 9. See golang.org/issue/22227. 1824 // 1825 // TODO: This may be unnecessary on Windows, which 1826 // doesn't model thread creation off fork. 1827 lock(&newmHandoff.lock) 1828 if newmHandoff.haveTemplateThread == 0 { 1829 throw("on a locked thread with no template thread") 1830 } 1831 mp.schedlink = newmHandoff.newm 1832 newmHandoff.newm.set(mp) 1833 if newmHandoff.waiting { 1834 newmHandoff.waiting = false 1835 notewakeup(&newmHandoff.wake) 1836 } 1837 unlock(&newmHandoff.lock) 1838 return 1839 } 1840 newm1(mp) 1841 } 1842 1843 func newm1(mp *m) { 1844 if iscgo { 1845 var ts cgothreadstart 1846 if _cgo_thread_start == nil { 1847 throw("_cgo_thread_start missing") 1848 } 1849 ts.g.set(mp.g0) 1850 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1851 ts.fn = unsafe.Pointer(funcPC(mstart)) 1852 if msanenabled { 1853 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 1854 } 1855 execLock.rlock() // Prevent process clone. 1856 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1857 execLock.runlock() 1858 return 1859 } 1860 execLock.rlock() // Prevent process clone. 1861 newosproc(mp) 1862 execLock.runlock() 1863 } 1864 1865 // startTemplateThread starts the template thread if it is not already 1866 // running. 1867 // 1868 // The calling thread must itself be in a known-good state. 1869 func startTemplateThread() { 1870 if GOARCH == "wasm" { // no threads on wasm yet 1871 return 1872 } 1873 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) { 1874 return 1875 } 1876 newm(templateThread, nil) 1877 } 1878 1879 // templateThread is a thread in a known-good state that exists solely 1880 // to start new threads in known-good states when the calling thread 1881 // may not be in a good state. 1882 // 1883 // Many programs never need this, so templateThread is started lazily 1884 // when we first enter a state that might lead to running on a thread 1885 // in an unknown state. 1886 // 1887 // templateThread runs on an M without a P, so it must not have write 1888 // barriers. 1889 // 1890 //go:nowritebarrierrec 1891 func templateThread() { 1892 lock(&sched.lock) 1893 sched.nmsys++ 1894 checkdead() 1895 unlock(&sched.lock) 1896 1897 for { 1898 lock(&newmHandoff.lock) 1899 for newmHandoff.newm != 0 { 1900 newm := newmHandoff.newm.ptr() 1901 newmHandoff.newm = 0 1902 unlock(&newmHandoff.lock) 1903 for newm != nil { 1904 next := newm.schedlink.ptr() 1905 newm.schedlink = 0 1906 newm1(newm) 1907 newm = next 1908 } 1909 lock(&newmHandoff.lock) 1910 } 1911 newmHandoff.waiting = true 1912 noteclear(&newmHandoff.wake) 1913 unlock(&newmHandoff.lock) 1914 notesleep(&newmHandoff.wake) 1915 } 1916 } 1917 1918 // Stops execution of the current m until new work is available. 1919 // Returns with acquired P. 1920 func stopm() { 1921 _g_ := getg() 1922 1923 if _g_.m.locks != 0 { 1924 throw("stopm holding locks") 1925 } 1926 if _g_.m.p != 0 { 1927 throw("stopm holding p") 1928 } 1929 if _g_.m.spinning { 1930 throw("stopm spinning") 1931 } 1932 1933 lock(&sched.lock) 1934 mput(_g_.m) 1935 unlock(&sched.lock) 1936 notesleep(&_g_.m.park) 1937 noteclear(&_g_.m.park) 1938 acquirep(_g_.m.nextp.ptr()) 1939 _g_.m.nextp = 0 1940 } 1941 1942 func mspinning() { 1943 // startm's caller incremented nmspinning. Set the new M's spinning. 1944 getg().m.spinning = true 1945 } 1946 1947 // Schedules some M to run the p (creates an M if necessary). 1948 // If p==nil, tries to get an idle P, if no idle P's does nothing. 1949 // May run with m.p==nil, so write barriers are not allowed. 1950 // If spinning is set, the caller has incremented nmspinning and startm will 1951 // either decrement nmspinning or set m.spinning in the newly started M. 1952 //go:nowritebarrierrec 1953 func startm(_p_ *p, spinning bool) { 1954 lock(&sched.lock) 1955 if _p_ == nil { 1956 _p_ = pidleget() 1957 if _p_ == nil { 1958 unlock(&sched.lock) 1959 if spinning { 1960 // The caller incremented nmspinning, but there are no idle Ps, 1961 // so it's okay to just undo the increment and give up. 1962 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1963 throw("startm: negative nmspinning") 1964 } 1965 } 1966 return 1967 } 1968 } 1969 mp := mget() 1970 unlock(&sched.lock) 1971 if mp == nil { 1972 var fn func() 1973 if spinning { 1974 // The caller incremented nmspinning, so set m.spinning in the new M. 1975 fn = mspinning 1976 } 1977 newm(fn, _p_) 1978 return 1979 } 1980 if mp.spinning { 1981 throw("startm: m is spinning") 1982 } 1983 if mp.nextp != 0 { 1984 throw("startm: m has p") 1985 } 1986 if spinning && !runqempty(_p_) { 1987 throw("startm: p has runnable gs") 1988 } 1989 // The caller incremented nmspinning, so set m.spinning in the new M. 1990 mp.spinning = spinning 1991 mp.nextp.set(_p_) 1992 notewakeup(&mp.park) 1993 } 1994 1995 // Hands off P from syscall or locked M. 1996 // Always runs without a P, so write barriers are not allowed. 1997 //go:nowritebarrierrec 1998 func handoffp(_p_ *p) { 1999 // handoffp must start an M in any situation where 2000 // findrunnable would return a G to run on _p_. 2001 2002 // if it has local work, start it straight away 2003 if !runqempty(_p_) || sched.runqsize != 0 { 2004 startm(_p_, false) 2005 return 2006 } 2007 // if it has GC work, start it straight away 2008 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 2009 startm(_p_, false) 2010 return 2011 } 2012 // no local work, check that there are no spinning/idle M's, 2013 // otherwise our help is not required 2014 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 2015 startm(_p_, true) 2016 return 2017 } 2018 lock(&sched.lock) 2019 if sched.gcwaiting != 0 { 2020 _p_.status = _Pgcstop 2021 sched.stopwait-- 2022 if sched.stopwait == 0 { 2023 notewakeup(&sched.stopnote) 2024 } 2025 unlock(&sched.lock) 2026 return 2027 } 2028 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 2029 sched.safePointFn(_p_) 2030 sched.safePointWait-- 2031 if sched.safePointWait == 0 { 2032 notewakeup(&sched.safePointNote) 2033 } 2034 } 2035 if sched.runqsize != 0 { 2036 unlock(&sched.lock) 2037 startm(_p_, false) 2038 return 2039 } 2040 // If this is the last running P and nobody is polling network, 2041 // need to wakeup another M to poll network. 2042 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 2043 unlock(&sched.lock) 2044 startm(_p_, false) 2045 return 2046 } 2047 pidleput(_p_) 2048 unlock(&sched.lock) 2049 } 2050 2051 // Tries to add one more P to execute G's. 2052 // Called when a G is made runnable (newproc, ready). 2053 func wakep() { 2054 // be conservative about spinning threads 2055 if !atomic.Cas(&sched.nmspinning, 0, 1) { 2056 return 2057 } 2058 startm(nil, true) 2059 } 2060 2061 // Stops execution of the current m that is locked to a g until the g is runnable again. 2062 // Returns with acquired P. 2063 func stoplockedm() { 2064 _g_ := getg() 2065 2066 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m { 2067 throw("stoplockedm: inconsistent locking") 2068 } 2069 if _g_.m.p != 0 { 2070 // Schedule another M to run this p. 2071 _p_ := releasep() 2072 handoffp(_p_) 2073 } 2074 incidlelocked(1) 2075 // Wait until another thread schedules lockedg again. 2076 notesleep(&_g_.m.park) 2077 noteclear(&_g_.m.park) 2078 status := readgstatus(_g_.m.lockedg.ptr()) 2079 if status&^_Gscan != _Grunnable { 2080 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 2081 dumpgstatus(_g_) 2082 throw("stoplockedm: not runnable") 2083 } 2084 acquirep(_g_.m.nextp.ptr()) 2085 _g_.m.nextp = 0 2086 } 2087 2088 // Schedules the locked m to run the locked gp. 2089 // May run during STW, so write barriers are not allowed. 2090 //go:nowritebarrierrec 2091 func startlockedm(gp *g) { 2092 _g_ := getg() 2093 2094 mp := gp.lockedm.ptr() 2095 if mp == _g_.m { 2096 throw("startlockedm: locked to me") 2097 } 2098 if mp.nextp != 0 { 2099 throw("startlockedm: m has p") 2100 } 2101 // directly handoff current P to the locked m 2102 incidlelocked(-1) 2103 _p_ := releasep() 2104 mp.nextp.set(_p_) 2105 notewakeup(&mp.park) 2106 stopm() 2107 } 2108 2109 // Stops the current m for stopTheWorld. 2110 // Returns when the world is restarted. 2111 func gcstopm() { 2112 _g_ := getg() 2113 2114 if sched.gcwaiting == 0 { 2115 throw("gcstopm: not waiting for gc") 2116 } 2117 if _g_.m.spinning { 2118 _g_.m.spinning = false 2119 // OK to just drop nmspinning here, 2120 // startTheWorld will unpark threads as necessary. 2121 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2122 throw("gcstopm: negative nmspinning") 2123 } 2124 } 2125 _p_ := releasep() 2126 lock(&sched.lock) 2127 _p_.status = _Pgcstop 2128 sched.stopwait-- 2129 if sched.stopwait == 0 { 2130 notewakeup(&sched.stopnote) 2131 } 2132 unlock(&sched.lock) 2133 stopm() 2134 } 2135 2136 // Schedules gp to run on the current M. 2137 // If inheritTime is true, gp inherits the remaining time in the 2138 // current time slice. Otherwise, it starts a new time slice. 2139 // Never returns. 2140 // 2141 // Write barriers are allowed because this is called immediately after 2142 // acquiring a P in several places. 2143 // 2144 //go:yeswritebarrierrec 2145 func execute(gp *g, inheritTime bool) { 2146 _g_ := getg() 2147 2148 casgstatus(gp, _Grunnable, _Grunning) 2149 gp.waitsince = 0 2150 gp.preempt = false 2151 gp.stackguard0 = gp.stack.lo + _StackGuard 2152 if !inheritTime { 2153 _g_.m.p.ptr().schedtick++ 2154 } 2155 _g_.m.curg = gp 2156 gp.m = _g_.m 2157 2158 // Check whether the profiler needs to be turned on or off. 2159 hz := sched.profilehz 2160 if _g_.m.profilehz != hz { 2161 setThreadCPUProfiler(hz) 2162 } 2163 2164 if trace.enabled { 2165 // GoSysExit has to happen when we have a P, but before GoStart. 2166 // So we emit it here. 2167 if gp.syscallsp != 0 && gp.sysblocktraced { 2168 traceGoSysExit(gp.sysexitticks) 2169 } 2170 traceGoStart() 2171 } 2172 2173 gogo(&gp.sched) 2174 } 2175 2176 // Finds a runnable goroutine to execute. 2177 // Tries to steal from other P's, get g from global queue, poll network. 2178 func findrunnable() (gp *g, inheritTime bool) { 2179 _g_ := getg() 2180 2181 // The conditions here and in handoffp must agree: if 2182 // findrunnable would return a G to run, handoffp must start 2183 // an M. 2184 2185 top: 2186 _p_ := _g_.m.p.ptr() 2187 if sched.gcwaiting != 0 { 2188 gcstopm() 2189 goto top 2190 } 2191 if _p_.runSafePointFn != 0 { 2192 runSafePointFn() 2193 } 2194 if fingwait && fingwake { 2195 if gp := wakefing(); gp != nil { 2196 ready(gp, 0, true) 2197 } 2198 } 2199 if *cgo_yield != nil { 2200 asmcgocall(*cgo_yield, nil) 2201 } 2202 2203 // local runq 2204 if gp, inheritTime := runqget(_p_); gp != nil { 2205 return gp, inheritTime 2206 } 2207 2208 // global runq 2209 if sched.runqsize != 0 { 2210 lock(&sched.lock) 2211 gp := globrunqget(_p_, 0) 2212 unlock(&sched.lock) 2213 if gp != nil { 2214 return gp, false 2215 } 2216 } 2217 2218 // Poll network. 2219 // This netpoll is only an optimization before we resort to stealing. 2220 // We can safely skip it if there are no waiters or a thread is blocked 2221 // in netpoll already. If there is any kind of logical race with that 2222 // blocked thread (e.g. it has already returned from netpoll, but does 2223 // not set lastpoll yet), this thread will do blocking netpoll below 2224 // anyway. 2225 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 { 2226 if list := netpoll(false); !list.empty() { // non-blocking 2227 gp := list.pop() 2228 injectglist(&list) 2229 casgstatus(gp, _Gwaiting, _Grunnable) 2230 if trace.enabled { 2231 traceGoUnpark(gp, 0) 2232 } 2233 return gp, false 2234 } 2235 } 2236 2237 // Steal work from other P's. 2238 procs := uint32(gomaxprocs) 2239 if atomic.Load(&sched.npidle) == procs-1 { 2240 // Either GOMAXPROCS=1 or everybody, except for us, is idle already. 2241 // New work can appear from returning syscall/cgocall, network or timers. 2242 // Neither of that submits to local run queues, so no point in stealing. 2243 goto stop 2244 } 2245 // If number of spinning M's >= number of busy P's, block. 2246 // This is necessary to prevent excessive CPU consumption 2247 // when GOMAXPROCS>>1 but the program parallelism is low. 2248 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { 2249 goto stop 2250 } 2251 if !_g_.m.spinning { 2252 _g_.m.spinning = true 2253 atomic.Xadd(&sched.nmspinning, 1) 2254 } 2255 for i := 0; i < 4; i++ { 2256 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { 2257 if sched.gcwaiting != 0 { 2258 goto top 2259 } 2260 stealRunNextG := i > 2 // first look for ready queues with more than 1 g 2261 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil { 2262 return gp, false 2263 } 2264 } 2265 } 2266 2267 stop: 2268 2269 // We have nothing to do. If we're in the GC mark phase, can 2270 // safely scan and blacken objects, and have work to do, run 2271 // idle-time marking rather than give up the P. 2272 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) { 2273 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 2274 gp := _p_.gcBgMarkWorker.ptr() 2275 casgstatus(gp, _Gwaiting, _Grunnable) 2276 if trace.enabled { 2277 traceGoUnpark(gp, 0) 2278 } 2279 return gp, false 2280 } 2281 2282 // wasm only: 2283 // If a callback returned and no other goroutine is awake, 2284 // then pause execution until a callback was triggered. 2285 if beforeIdle() { 2286 // At least one goroutine got woken. 2287 goto top 2288 } 2289 2290 // Before we drop our P, make a snapshot of the allp slice, 2291 // which can change underfoot once we no longer block 2292 // safe-points. We don't need to snapshot the contents because 2293 // everything up to cap(allp) is immutable. 2294 allpSnapshot := allp 2295 2296 // return P and block 2297 lock(&sched.lock) 2298 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { 2299 unlock(&sched.lock) 2300 goto top 2301 } 2302 if sched.runqsize != 0 { 2303 gp := globrunqget(_p_, 0) 2304 unlock(&sched.lock) 2305 return gp, false 2306 } 2307 if releasep() != _p_ { 2308 throw("findrunnable: wrong p") 2309 } 2310 pidleput(_p_) 2311 unlock(&sched.lock) 2312 2313 // Delicate dance: thread transitions from spinning to non-spinning state, 2314 // potentially concurrently with submission of new goroutines. We must 2315 // drop nmspinning first and then check all per-P queues again (with 2316 // #StoreLoad memory barrier in between). If we do it the other way around, 2317 // another thread can submit a goroutine after we've checked all run queues 2318 // but before we drop nmspinning; as the result nobody will unpark a thread 2319 // to run the goroutine. 2320 // If we discover new work below, we need to restore m.spinning as a signal 2321 // for resetspinning to unpark a new worker thread (because there can be more 2322 // than one starving goroutine). However, if after discovering new work 2323 // we also observe no idle Ps, it is OK to just park the current thread: 2324 // the system is fully loaded so no spinning threads are required. 2325 // Also see "Worker thread parking/unparking" comment at the top of the file. 2326 wasSpinning := _g_.m.spinning 2327 if _g_.m.spinning { 2328 _g_.m.spinning = false 2329 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2330 throw("findrunnable: negative nmspinning") 2331 } 2332 } 2333 2334 // check all runqueues once again 2335 for _, _p_ := range allpSnapshot { 2336 if !runqempty(_p_) { 2337 lock(&sched.lock) 2338 _p_ = pidleget() 2339 unlock(&sched.lock) 2340 if _p_ != nil { 2341 acquirep(_p_) 2342 if wasSpinning { 2343 _g_.m.spinning = true 2344 atomic.Xadd(&sched.nmspinning, 1) 2345 } 2346 goto top 2347 } 2348 break 2349 } 2350 } 2351 2352 // Check for idle-priority GC work again. 2353 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) { 2354 lock(&sched.lock) 2355 _p_ = pidleget() 2356 if _p_ != nil && _p_.gcBgMarkWorker == 0 { 2357 pidleput(_p_) 2358 _p_ = nil 2359 } 2360 unlock(&sched.lock) 2361 if _p_ != nil { 2362 acquirep(_p_) 2363 if wasSpinning { 2364 _g_.m.spinning = true 2365 atomic.Xadd(&sched.nmspinning, 1) 2366 } 2367 // Go back to idle GC check. 2368 goto stop 2369 } 2370 } 2371 2372 // poll network 2373 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 2374 if _g_.m.p != 0 { 2375 throw("findrunnable: netpoll with p") 2376 } 2377 if _g_.m.spinning { 2378 throw("findrunnable: netpoll with spinning") 2379 } 2380 list := netpoll(true) // block until new work is available 2381 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 2382 if !list.empty() { 2383 lock(&sched.lock) 2384 _p_ = pidleget() 2385 unlock(&sched.lock) 2386 if _p_ != nil { 2387 acquirep(_p_) 2388 gp := list.pop() 2389 injectglist(&list) 2390 casgstatus(gp, _Gwaiting, _Grunnable) 2391 if trace.enabled { 2392 traceGoUnpark(gp, 0) 2393 } 2394 return gp, false 2395 } 2396 injectglist(&list) 2397 } 2398 } 2399 stopm() 2400 goto top 2401 } 2402 2403 // pollWork reports whether there is non-background work this P could 2404 // be doing. This is a fairly lightweight check to be used for 2405 // background work loops, like idle GC. It checks a subset of the 2406 // conditions checked by the actual scheduler. 2407 func pollWork() bool { 2408 if sched.runqsize != 0 { 2409 return true 2410 } 2411 p := getg().m.p.ptr() 2412 if !runqempty(p) { 2413 return true 2414 } 2415 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { 2416 if list := netpoll(false); !list.empty() { 2417 injectglist(&list) 2418 return true 2419 } 2420 } 2421 return false 2422 } 2423 2424 func resetspinning() { 2425 _g_ := getg() 2426 if !_g_.m.spinning { 2427 throw("resetspinning: not a spinning m") 2428 } 2429 _g_.m.spinning = false 2430 nmspinning := atomic.Xadd(&sched.nmspinning, -1) 2431 if int32(nmspinning) < 0 { 2432 throw("findrunnable: negative nmspinning") 2433 } 2434 // M wakeup policy is deliberately somewhat conservative, so check if we 2435 // need to wakeup another P here. See "Worker thread parking/unparking" 2436 // comment at the top of the file for details. 2437 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 { 2438 wakep() 2439 } 2440 } 2441 2442 // Injects the list of runnable G's into the scheduler and clears glist. 2443 // Can run concurrently with GC. 2444 func injectglist(glist *gList) { 2445 if glist.empty() { 2446 return 2447 } 2448 if trace.enabled { 2449 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { 2450 traceGoUnpark(gp, 0) 2451 } 2452 } 2453 lock(&sched.lock) 2454 var n int 2455 for n = 0; !glist.empty(); n++ { 2456 gp := glist.pop() 2457 casgstatus(gp, _Gwaiting, _Grunnable) 2458 globrunqput(gp) 2459 } 2460 unlock(&sched.lock) 2461 for ; n != 0 && sched.npidle != 0; n-- { 2462 startm(nil, false) 2463 } 2464 *glist = gList{} 2465 } 2466 2467 // One round of scheduler: find a runnable goroutine and execute it. 2468 // Never returns. 2469 func schedule() { 2470 _g_ := getg() 2471 2472 if _g_.m.locks != 0 { 2473 throw("schedule: holding locks") 2474 } 2475 2476 if _g_.m.lockedg != 0 { 2477 stoplockedm() 2478 execute(_g_.m.lockedg.ptr(), false) // Never returns. 2479 } 2480 2481 // We should not schedule away from a g that is executing a cgo call, 2482 // since the cgo call is using the m's g0 stack. 2483 if _g_.m.incgo { 2484 throw("schedule: in cgo") 2485 } 2486 2487 top: 2488 if sched.gcwaiting != 0 { 2489 gcstopm() 2490 goto top 2491 } 2492 if _g_.m.p.ptr().runSafePointFn != 0 { 2493 runSafePointFn() 2494 } 2495 2496 var gp *g 2497 var inheritTime bool 2498 if trace.enabled || trace.shutdown { 2499 gp = traceReader() 2500 if gp != nil { 2501 casgstatus(gp, _Gwaiting, _Grunnable) 2502 traceGoUnpark(gp, 0) 2503 } 2504 } 2505 if gp == nil && gcBlackenEnabled != 0 { 2506 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 2507 } 2508 if gp == nil { 2509 // Check the global runnable queue once in a while to ensure fairness. 2510 // Otherwise two goroutines can completely occupy the local runqueue 2511 // by constantly respawning each other. 2512 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 2513 lock(&sched.lock) 2514 gp = globrunqget(_g_.m.p.ptr(), 1) 2515 unlock(&sched.lock) 2516 } 2517 } 2518 if gp == nil { 2519 gp, inheritTime = runqget(_g_.m.p.ptr()) 2520 if gp != nil && _g_.m.spinning { 2521 throw("schedule: spinning with local work") 2522 } 2523 } 2524 if gp == nil { 2525 gp, inheritTime = findrunnable() // blocks until work is available 2526 } 2527 2528 // This thread is going to run a goroutine and is not spinning anymore, 2529 // so if it was marked as spinning we need to reset it now and potentially 2530 // start a new spinning M. 2531 if _g_.m.spinning { 2532 resetspinning() 2533 } 2534 2535 if sched.disable.user && !schedEnabled(gp) { 2536 // Scheduling of this goroutine is disabled. Put it on 2537 // the list of pending runnable goroutines for when we 2538 // re-enable user scheduling and look again. 2539 lock(&sched.lock) 2540 if schedEnabled(gp) { 2541 // Something re-enabled scheduling while we 2542 // were acquiring the lock. 2543 unlock(&sched.lock) 2544 } else { 2545 sched.disable.runnable.pushBack(gp) 2546 sched.disable.n++ 2547 unlock(&sched.lock) 2548 goto top 2549 } 2550 } 2551 2552 if gp.lockedm != 0 { 2553 // Hands off own p to the locked m, 2554 // then blocks waiting for a new p. 2555 startlockedm(gp) 2556 goto top 2557 } 2558 2559 execute(gp, inheritTime) 2560 } 2561 2562 // dropg removes the association between m and the current goroutine m->curg (gp for short). 2563 // Typically a caller sets gp's status away from Grunning and then 2564 // immediately calls dropg to finish the job. The caller is also responsible 2565 // for arranging that gp will be restarted using ready at an 2566 // appropriate time. After calling dropg and arranging for gp to be 2567 // readied later, the caller can do other work but eventually should 2568 // call schedule to restart the scheduling of goroutines on this m. 2569 func dropg() { 2570 _g_ := getg() 2571 2572 setMNoWB(&_g_.m.curg.m, nil) 2573 setGNoWB(&_g_.m.curg, nil) 2574 } 2575 2576 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 2577 unlock((*mutex)(lock)) 2578 return true 2579 } 2580 2581 // park continuation on g0. 2582 func park_m(gp *g) { 2583 _g_ := getg() 2584 2585 if trace.enabled { 2586 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip) 2587 } 2588 2589 casgstatus(gp, _Grunning, _Gwaiting) 2590 dropg() 2591 2592 if _g_.m.waitunlockf != nil { 2593 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf)) 2594 ok := fn(gp, _g_.m.waitlock) 2595 _g_.m.waitunlockf = nil 2596 _g_.m.waitlock = nil 2597 if !ok { 2598 if trace.enabled { 2599 traceGoUnpark(gp, 2) 2600 } 2601 casgstatus(gp, _Gwaiting, _Grunnable) 2602 execute(gp, true) // Schedule it back, never returns. 2603 } 2604 } 2605 schedule() 2606 } 2607 2608 func goschedImpl(gp *g) { 2609 status := readgstatus(gp) 2610 if status&^_Gscan != _Grunning { 2611 dumpgstatus(gp) 2612 throw("bad g status") 2613 } 2614 casgstatus(gp, _Grunning, _Grunnable) 2615 dropg() 2616 lock(&sched.lock) 2617 globrunqput(gp) 2618 unlock(&sched.lock) 2619 2620 schedule() 2621 } 2622 2623 // Gosched continuation on g0. 2624 func gosched_m(gp *g) { 2625 if trace.enabled { 2626 traceGoSched() 2627 } 2628 goschedImpl(gp) 2629 } 2630 2631 // goschedguarded is a forbidden-states-avoided version of gosched_m 2632 func goschedguarded_m(gp *g) { 2633 2634 if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning { 2635 gogo(&gp.sched) // never return 2636 } 2637 2638 if trace.enabled { 2639 traceGoSched() 2640 } 2641 goschedImpl(gp) 2642 } 2643 2644 func gopreempt_m(gp *g) { 2645 if trace.enabled { 2646 traceGoPreempt() 2647 } 2648 goschedImpl(gp) 2649 } 2650 2651 // Finishes execution of the current goroutine. 2652 func goexit1() { 2653 if raceenabled { 2654 racegoend() 2655 } 2656 if trace.enabled { 2657 traceGoEnd() 2658 } 2659 mcall(goexit0) 2660 } 2661 2662 // goexit continuation on g0. 2663 func goexit0(gp *g) { 2664 _g_ := getg() 2665 2666 casgstatus(gp, _Grunning, _Gdead) 2667 if isSystemGoroutine(gp, false) { 2668 atomic.Xadd(&sched.ngsys, -1) 2669 } 2670 gp.m = nil 2671 locked := gp.lockedm != 0 2672 gp.lockedm = 0 2673 _g_.m.lockedg = 0 2674 gp.paniconfault = false 2675 gp._defer = nil // should be true already but just in case. 2676 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 2677 gp.writebuf = nil 2678 gp.waitreason = 0 2679 gp.param = nil 2680 gp.labels = nil 2681 gp.timer = nil 2682 2683 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { 2684 // Flush assist credit to the global pool. This gives 2685 // better information to pacing if the application is 2686 // rapidly creating an exiting goroutines. 2687 scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes)) 2688 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit) 2689 gp.gcAssistBytes = 0 2690 } 2691 2692 // Note that gp's stack scan is now "valid" because it has no 2693 // stack. 2694 gp.gcscanvalid = true 2695 dropg() 2696 2697 if GOARCH == "wasm" { // no threads yet on wasm 2698 gfput(_g_.m.p.ptr(), gp) 2699 schedule() // never returns 2700 } 2701 2702 if _g_.m.lockedInt != 0 { 2703 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n") 2704 throw("internal lockOSThread error") 2705 } 2706 gfput(_g_.m.p.ptr(), gp) 2707 if locked { 2708 // The goroutine may have locked this thread because 2709 // it put it in an unusual kernel state. Kill it 2710 // rather than returning it to the thread pool. 2711 2712 // Return to mstart, which will release the P and exit 2713 // the thread. 2714 if GOOS != "plan9" { // See golang.org/issue/22227. 2715 gogo(&_g_.m.g0.sched) 2716 } else { 2717 // Clear lockedExt on plan9 since we may end up re-using 2718 // this thread. 2719 _g_.m.lockedExt = 0 2720 } 2721 } 2722 schedule() 2723 } 2724 2725 // save updates getg().sched to refer to pc and sp so that a following 2726 // gogo will restore pc and sp. 2727 // 2728 // save must not have write barriers because invoking a write barrier 2729 // can clobber getg().sched. 2730 // 2731 //go:nosplit 2732 //go:nowritebarrierrec 2733 func save(pc, sp uintptr) { 2734 _g_ := getg() 2735 2736 _g_.sched.pc = pc 2737 _g_.sched.sp = sp 2738 _g_.sched.lr = 0 2739 _g_.sched.ret = 0 2740 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 2741 // We need to ensure ctxt is zero, but can't have a write 2742 // barrier here. However, it should always already be zero. 2743 // Assert that. 2744 if _g_.sched.ctxt != nil { 2745 badctxt() 2746 } 2747 } 2748 2749 // The goroutine g is about to enter a system call. 2750 // Record that it's not using the cpu anymore. 2751 // This is called only from the go syscall library and cgocall, 2752 // not from the low-level system calls used by the runtime. 2753 // 2754 // Entersyscall cannot split the stack: the gosave must 2755 // make g->sched refer to the caller's stack segment, because 2756 // entersyscall is going to return immediately after. 2757 // 2758 // Nothing entersyscall calls can split the stack either. 2759 // We cannot safely move the stack during an active call to syscall, 2760 // because we do not know which of the uintptr arguments are 2761 // really pointers (back into the stack). 2762 // In practice, this means that we make the fast path run through 2763 // entersyscall doing no-split things, and the slow path has to use systemstack 2764 // to run bigger things on the system stack. 2765 // 2766 // reentersyscall is the entry point used by cgo callbacks, where explicitly 2767 // saved SP and PC are restored. This is needed when exitsyscall will be called 2768 // from a function further up in the call stack than the parent, as g->syscallsp 2769 // must always point to a valid stack frame. entersyscall below is the normal 2770 // entry point for syscalls, which obtains the SP and PC from the caller. 2771 // 2772 // Syscall tracing: 2773 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 2774 // If the syscall does not block, that is it, we do not emit any other events. 2775 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 2776 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 2777 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 2778 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 2779 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 2780 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 2781 // and we wait for the increment before emitting traceGoSysExit. 2782 // Note that the increment is done even if tracing is not enabled, 2783 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 2784 // 2785 //go:nosplit 2786 func reentersyscall(pc, sp uintptr) { 2787 _g_ := getg() 2788 2789 // Disable preemption because during this function g is in Gsyscall status, 2790 // but can have inconsistent g->sched, do not let GC observe it. 2791 _g_.m.locks++ 2792 2793 // Entersyscall must not call any function that might split/grow the stack. 2794 // (See details in comment above.) 2795 // Catch calls that might, by replacing the stack guard with something that 2796 // will trip any stack check and leaving a flag to tell newstack to die. 2797 _g_.stackguard0 = stackPreempt 2798 _g_.throwsplit = true 2799 2800 // Leave SP around for GC and traceback. 2801 save(pc, sp) 2802 _g_.syscallsp = sp 2803 _g_.syscallpc = pc 2804 casgstatus(_g_, _Grunning, _Gsyscall) 2805 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2806 systemstack(func() { 2807 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2808 throw("entersyscall") 2809 }) 2810 } 2811 2812 if trace.enabled { 2813 systemstack(traceGoSysCall) 2814 // systemstack itself clobbers g.sched.{pc,sp} and we might 2815 // need them later when the G is genuinely blocked in a 2816 // syscall 2817 save(pc, sp) 2818 } 2819 2820 if atomic.Load(&sched.sysmonwait) != 0 { 2821 systemstack(entersyscall_sysmon) 2822 save(pc, sp) 2823 } 2824 2825 if _g_.m.p.ptr().runSafePointFn != 0 { 2826 // runSafePointFn may stack split if run on this stack 2827 systemstack(runSafePointFn) 2828 save(pc, sp) 2829 } 2830 2831 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2832 _g_.sysblocktraced = true 2833 _g_.m.mcache = nil 2834 pp := _g_.m.p.ptr() 2835 pp.m = 0 2836 _g_.m.oldp.set(pp) 2837 _g_.m.p = 0 2838 atomic.Store(&pp.status, _Psyscall) 2839 if sched.gcwaiting != 0 { 2840 systemstack(entersyscall_gcwait) 2841 save(pc, sp) 2842 } 2843 2844 _g_.m.locks-- 2845 } 2846 2847 // Standard syscall entry used by the go syscall library and normal cgo calls. 2848 //go:nosplit 2849 func entersyscall() { 2850 reentersyscall(getcallerpc(), getcallersp()) 2851 } 2852 2853 func entersyscall_sysmon() { 2854 lock(&sched.lock) 2855 if atomic.Load(&sched.sysmonwait) != 0 { 2856 atomic.Store(&sched.sysmonwait, 0) 2857 notewakeup(&sched.sysmonnote) 2858 } 2859 unlock(&sched.lock) 2860 } 2861 2862 func entersyscall_gcwait() { 2863 _g_ := getg() 2864 _p_ := _g_.m.oldp.ptr() 2865 2866 lock(&sched.lock) 2867 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 2868 if trace.enabled { 2869 traceGoSysBlock(_p_) 2870 traceProcStop(_p_) 2871 } 2872 _p_.syscalltick++ 2873 if sched.stopwait--; sched.stopwait == 0 { 2874 notewakeup(&sched.stopnote) 2875 } 2876 } 2877 unlock(&sched.lock) 2878 } 2879 2880 // The same as entersyscall(), but with a hint that the syscall is blocking. 2881 //go:nosplit 2882 func entersyscallblock() { 2883 _g_ := getg() 2884 2885 _g_.m.locks++ // see comment in entersyscall 2886 _g_.throwsplit = true 2887 _g_.stackguard0 = stackPreempt // see comment in entersyscall 2888 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2889 _g_.sysblocktraced = true 2890 _g_.m.p.ptr().syscalltick++ 2891 2892 // Leave SP around for GC and traceback. 2893 pc := getcallerpc() 2894 sp := getcallersp() 2895 save(pc, sp) 2896 _g_.syscallsp = _g_.sched.sp 2897 _g_.syscallpc = _g_.sched.pc 2898 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2899 sp1 := sp 2900 sp2 := _g_.sched.sp 2901 sp3 := _g_.syscallsp 2902 systemstack(func() { 2903 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2904 throw("entersyscallblock") 2905 }) 2906 } 2907 casgstatus(_g_, _Grunning, _Gsyscall) 2908 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2909 systemstack(func() { 2910 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2911 throw("entersyscallblock") 2912 }) 2913 } 2914 2915 systemstack(entersyscallblock_handoff) 2916 2917 // Resave for traceback during blocked call. 2918 save(getcallerpc(), getcallersp()) 2919 2920 _g_.m.locks-- 2921 } 2922 2923 func entersyscallblock_handoff() { 2924 if trace.enabled { 2925 traceGoSysCall() 2926 traceGoSysBlock(getg().m.p.ptr()) 2927 } 2928 handoffp(releasep()) 2929 } 2930 2931 // The goroutine g exited its system call. 2932 // Arrange for it to run on a cpu again. 2933 // This is called only from the go syscall library, not 2934 // from the low-level system calls used by the runtime. 2935 // 2936 // Write barriers are not allowed because our P may have been stolen. 2937 // 2938 //go:nosplit 2939 //go:nowritebarrierrec 2940 func exitsyscall() { 2941 _g_ := getg() 2942 2943 _g_.m.locks++ // see comment in entersyscall 2944 if getcallersp() > _g_.syscallsp { 2945 throw("exitsyscall: syscall frame is no longer valid") 2946 } 2947 2948 _g_.waitsince = 0 2949 oldp := _g_.m.oldp.ptr() 2950 _g_.m.oldp = 0 2951 if exitsyscallfast(oldp) { 2952 if _g_.m.mcache == nil { 2953 throw("lost mcache") 2954 } 2955 if trace.enabled { 2956 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2957 systemstack(traceGoStart) 2958 } 2959 } 2960 // There's a cpu for us, so we can run. 2961 _g_.m.p.ptr().syscalltick++ 2962 // We need to cas the status and scan before resuming... 2963 casgstatus(_g_, _Gsyscall, _Grunning) 2964 2965 // Garbage collector isn't running (since we are), 2966 // so okay to clear syscallsp. 2967 _g_.syscallsp = 0 2968 _g_.m.locks-- 2969 if _g_.preempt { 2970 // restore the preemption request in case we've cleared it in newstack 2971 _g_.stackguard0 = stackPreempt 2972 } else { 2973 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 2974 _g_.stackguard0 = _g_.stack.lo + _StackGuard 2975 } 2976 _g_.throwsplit = false 2977 2978 if sched.disable.user && !schedEnabled(_g_) { 2979 // Scheduling of this goroutine is disabled. 2980 Gosched() 2981 } 2982 2983 return 2984 } 2985 2986 _g_.sysexitticks = 0 2987 if trace.enabled { 2988 // Wait till traceGoSysBlock event is emitted. 2989 // This ensures consistency of the trace (the goroutine is started after it is blocked). 2990 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 2991 osyield() 2992 } 2993 // We can't trace syscall exit right now because we don't have a P. 2994 // Tracing code can invoke write barriers that cannot run without a P. 2995 // So instead we remember the syscall exit time and emit the event 2996 // in execute when we have a P. 2997 _g_.sysexitticks = cputicks() 2998 } 2999 3000 _g_.m.locks-- 3001 3002 // Call the scheduler. 3003 mcall(exitsyscall0) 3004 3005 if _g_.m.mcache == nil { 3006 throw("lost mcache") 3007 } 3008 3009 // Scheduler returned, so we're allowed to run now. 3010 // Delete the syscallsp information that we left for 3011 // the garbage collector during the system call. 3012 // Must wait until now because until gosched returns 3013 // we don't know for sure that the garbage collector 3014 // is not running. 3015 _g_.syscallsp = 0 3016 _g_.m.p.ptr().syscalltick++ 3017 _g_.throwsplit = false 3018 } 3019 3020 //go:nosplit 3021 func exitsyscallfast(oldp *p) bool { 3022 _g_ := getg() 3023 3024 // Freezetheworld sets stopwait but does not retake P's. 3025 if sched.stopwait == freezeStopWait { 3026 return false 3027 } 3028 3029 // Try to re-acquire the last P. 3030 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) { 3031 // There's a cpu for us, so we can run. 3032 wirep(oldp) 3033 exitsyscallfast_reacquired() 3034 return true 3035 } 3036 3037 // Try to get any other idle P. 3038 if sched.pidle != 0 { 3039 var ok bool 3040 systemstack(func() { 3041 ok = exitsyscallfast_pidle() 3042 if ok && trace.enabled { 3043 if oldp != nil { 3044 // Wait till traceGoSysBlock event is emitted. 3045 // This ensures consistency of the trace (the goroutine is started after it is blocked). 3046 for oldp.syscalltick == _g_.m.syscalltick { 3047 osyield() 3048 } 3049 } 3050 traceGoSysExit(0) 3051 } 3052 }) 3053 if ok { 3054 return true 3055 } 3056 } 3057 return false 3058 } 3059 3060 // exitsyscallfast_reacquired is the exitsyscall path on which this G 3061 // has successfully reacquired the P it was running on before the 3062 // syscall. 3063 // 3064 //go:nosplit 3065 func exitsyscallfast_reacquired() { 3066 _g_ := getg() 3067 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 3068 if trace.enabled { 3069 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 3070 // traceGoSysBlock for this syscall was already emitted, 3071 // but here we effectively retake the p from the new syscall running on the same p. 3072 systemstack(func() { 3073 // Denote blocking of the new syscall. 3074 traceGoSysBlock(_g_.m.p.ptr()) 3075 // Denote completion of the current syscall. 3076 traceGoSysExit(0) 3077 }) 3078 } 3079 _g_.m.p.ptr().syscalltick++ 3080 } 3081 } 3082 3083 func exitsyscallfast_pidle() bool { 3084 lock(&sched.lock) 3085 _p_ := pidleget() 3086 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 3087 atomic.Store(&sched.sysmonwait, 0) 3088 notewakeup(&sched.sysmonnote) 3089 } 3090 unlock(&sched.lock) 3091 if _p_ != nil { 3092 acquirep(_p_) 3093 return true 3094 } 3095 return false 3096 } 3097 3098 // exitsyscall slow path on g0. 3099 // Failed to acquire P, enqueue gp as runnable. 3100 // 3101 //go:nowritebarrierrec 3102 func exitsyscall0(gp *g) { 3103 _g_ := getg() 3104 3105 casgstatus(gp, _Gsyscall, _Grunnable) 3106 dropg() 3107 lock(&sched.lock) 3108 var _p_ *p 3109 if schedEnabled(_g_) { 3110 _p_ = pidleget() 3111 } 3112 if _p_ == nil { 3113 globrunqput(gp) 3114 } else if atomic.Load(&sched.sysmonwait) != 0 { 3115 atomic.Store(&sched.sysmonwait, 0) 3116 notewakeup(&sched.sysmonnote) 3117 } 3118 unlock(&sched.lock) 3119 if _p_ != nil { 3120 acquirep(_p_) 3121 execute(gp, false) // Never returns. 3122 } 3123 if _g_.m.lockedg != 0 { 3124 // Wait until another thread schedules gp and so m again. 3125 stoplockedm() 3126 execute(gp, false) // Never returns. 3127 } 3128 stopm() 3129 schedule() // Never returns. 3130 } 3131 3132 func beforefork() { 3133 gp := getg().m.curg 3134 3135 // Block signals during a fork, so that the child does not run 3136 // a signal handler before exec if a signal is sent to the process 3137 // group. See issue #18600. 3138 gp.m.locks++ 3139 msigsave(gp.m) 3140 sigblock() 3141 3142 // This function is called before fork in syscall package. 3143 // Code between fork and exec must not allocate memory nor even try to grow stack. 3144 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 3145 // runtime_AfterFork will undo this in parent process, but not in child. 3146 gp.stackguard0 = stackFork 3147 } 3148 3149 // Called from syscall package before fork. 3150 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 3151 //go:nosplit 3152 func syscall_runtime_BeforeFork() { 3153 systemstack(beforefork) 3154 } 3155 3156 func afterfork() { 3157 gp := getg().m.curg 3158 3159 // See the comments in beforefork. 3160 gp.stackguard0 = gp.stack.lo + _StackGuard 3161 3162 msigrestore(gp.m.sigmask) 3163 3164 gp.m.locks-- 3165 } 3166 3167 // Called from syscall package after fork in parent. 3168 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 3169 //go:nosplit 3170 func syscall_runtime_AfterFork() { 3171 systemstack(afterfork) 3172 } 3173 3174 // inForkedChild is true while manipulating signals in the child process. 3175 // This is used to avoid calling libc functions in case we are using vfork. 3176 var inForkedChild bool 3177 3178 // Called from syscall package after fork in child. 3179 // It resets non-sigignored signals to the default handler, and 3180 // restores the signal mask in preparation for the exec. 3181 // 3182 // Because this might be called during a vfork, and therefore may be 3183 // temporarily sharing address space with the parent process, this must 3184 // not change any global variables or calling into C code that may do so. 3185 // 3186 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild 3187 //go:nosplit 3188 //go:nowritebarrierrec 3189 func syscall_runtime_AfterForkInChild() { 3190 // It's OK to change the global variable inForkedChild here 3191 // because we are going to change it back. There is no race here, 3192 // because if we are sharing address space with the parent process, 3193 // then the parent process can not be running concurrently. 3194 inForkedChild = true 3195 3196 clearSignalHandlers() 3197 3198 // When we are the child we are the only thread running, 3199 // so we know that nothing else has changed gp.m.sigmask. 3200 msigrestore(getg().m.sigmask) 3201 3202 inForkedChild = false 3203 } 3204 3205 // Called from syscall package before Exec. 3206 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec 3207 func syscall_runtime_BeforeExec() { 3208 // Prevent thread creation during exec. 3209 execLock.lock() 3210 } 3211 3212 // Called from syscall package after Exec. 3213 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec 3214 func syscall_runtime_AfterExec() { 3215 execLock.unlock() 3216 } 3217 3218 // Allocate a new g, with a stack big enough for stacksize bytes. 3219 func malg(stacksize int32) *g { 3220 newg := new(g) 3221 if stacksize >= 0 { 3222 stacksize = round2(_StackSystem + stacksize) 3223 systemstack(func() { 3224 newg.stack = stackalloc(uint32(stacksize)) 3225 }) 3226 newg.stackguard0 = newg.stack.lo + _StackGuard 3227 newg.stackguard1 = ^uintptr(0) 3228 } 3229 return newg 3230 } 3231 3232 // Create a new g running fn with siz bytes of arguments. 3233 // Put it on the queue of g's waiting to run. 3234 // The compiler turns a go statement into a call to this. 3235 // Cannot split the stack because it assumes that the arguments 3236 // are available sequentially after &fn; they would not be 3237 // copied if a stack split occurred. 3238 //go:nosplit 3239 func newproc(siz int32, fn *funcval) { 3240 argp := add(unsafe.Pointer(&fn), sys.PtrSize) 3241 gp := getg() 3242 pc := getcallerpc() 3243 systemstack(func() { 3244 newproc1(fn, (*uint8)(argp), siz, gp, pc) 3245 }) 3246 } 3247 3248 // Create a new g running fn with narg bytes of arguments starting 3249 // at argp. callerpc is the address of the go statement that created 3250 // this. The new g is put on the queue of g's waiting to run. 3251 func newproc1(fn *funcval, argp *uint8, narg int32, callergp *g, callerpc uintptr) { 3252 _g_ := getg() 3253 3254 if fn == nil { 3255 _g_.m.throwing = -1 // do not dump full stacks 3256 throw("go of nil func value") 3257 } 3258 _g_.m.locks++ // disable preemption because it can be holding p in a local var 3259 siz := narg 3260 siz = (siz + 7) &^ 7 3261 3262 // We could allocate a larger initial stack if necessary. 3263 // Not worth it: this is almost always an error. 3264 // 4*sizeof(uintreg): extra space added below 3265 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 3266 if siz >= _StackMin-4*sys.RegSize-sys.RegSize { 3267 throw("newproc: function arguments too large for new goroutine") 3268 } 3269 3270 _p_ := _g_.m.p.ptr() 3271 newg := gfget(_p_) 3272 if newg == nil { 3273 newg = malg(_StackMin) 3274 casgstatus(newg, _Gidle, _Gdead) 3275 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 3276 } 3277 if newg.stack.hi == 0 { 3278 throw("newproc1: newg missing stack") 3279 } 3280 3281 if readgstatus(newg) != _Gdead { 3282 throw("newproc1: new g is not Gdead") 3283 } 3284 3285 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame 3286 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign 3287 sp := newg.stack.hi - totalSize 3288 spArg := sp 3289 if usesLR { 3290 // caller's LR 3291 *(*uintptr)(unsafe.Pointer(sp)) = 0 3292 prepGoExitFrame(sp) 3293 spArg += sys.MinFrameSize 3294 } 3295 if narg > 0 { 3296 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) 3297 // This is a stack-to-stack copy. If write barriers 3298 // are enabled and the source stack is grey (the 3299 // destination is always black), then perform a 3300 // barrier copy. We do this *after* the memmove 3301 // because the destination stack may have garbage on 3302 // it. 3303 if writeBarrier.needed && !_g_.m.curg.gcscandone { 3304 f := findfunc(fn.fn) 3305 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 3306 if stkmap.nbit > 0 { 3307 // We're in the prologue, so it's always stack map index 0. 3308 bv := stackmapdata(stkmap, 0) 3309 bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata) 3310 } 3311 } 3312 } 3313 3314 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 3315 newg.sched.sp = sp 3316 newg.stktopsp = sp 3317 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 3318 newg.sched.g = guintptr(unsafe.Pointer(newg)) 3319 gostartcallfn(&newg.sched, fn) 3320 newg.gopc = callerpc 3321 newg.ancestors = saveAncestors(callergp) 3322 newg.startpc = fn.fn 3323 if _g_.m.curg != nil { 3324 newg.labels = _g_.m.curg.labels 3325 } 3326 if isSystemGoroutine(newg, false) { 3327 atomic.Xadd(&sched.ngsys, +1) 3328 } 3329 newg.gcscanvalid = false 3330 casgstatus(newg, _Gdead, _Grunnable) 3331 3332 if _p_.goidcache == _p_.goidcacheend { 3333 // Sched.goidgen is the last allocated id, 3334 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 3335 // At startup sched.goidgen=0, so main goroutine receives goid=1. 3336 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 3337 _p_.goidcache -= _GoidCacheBatch - 1 3338 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 3339 } 3340 newg.goid = int64(_p_.goidcache) 3341 _p_.goidcache++ 3342 if raceenabled { 3343 newg.racectx = racegostart(callerpc) 3344 } 3345 if trace.enabled { 3346 traceGoCreate(newg, newg.startpc) 3347 } 3348 runqput(_p_, newg, true) 3349 3350 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted { 3351 wakep() 3352 } 3353 _g_.m.locks-- 3354 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 3355 _g_.stackguard0 = stackPreempt 3356 } 3357 } 3358 3359 // saveAncestors copies previous ancestors of the given caller g and 3360 // includes infor for the current caller into a new set of tracebacks for 3361 // a g being created. 3362 func saveAncestors(callergp *g) *[]ancestorInfo { 3363 // Copy all prior info, except for the root goroutine (goid 0). 3364 if debug.tracebackancestors <= 0 || callergp.goid == 0 { 3365 return nil 3366 } 3367 var callerAncestors []ancestorInfo 3368 if callergp.ancestors != nil { 3369 callerAncestors = *callergp.ancestors 3370 } 3371 n := int32(len(callerAncestors)) + 1 3372 if n > debug.tracebackancestors { 3373 n = debug.tracebackancestors 3374 } 3375 ancestors := make([]ancestorInfo, n) 3376 copy(ancestors[1:], callerAncestors) 3377 3378 var pcs [_TracebackMaxFrames]uintptr 3379 npcs := gcallers(callergp, 0, pcs[:]) 3380 ipcs := make([]uintptr, npcs) 3381 copy(ipcs, pcs[:]) 3382 ancestors[0] = ancestorInfo{ 3383 pcs: ipcs, 3384 goid: callergp.goid, 3385 gopc: callergp.gopc, 3386 } 3387 3388 ancestorsp := new([]ancestorInfo) 3389 *ancestorsp = ancestors 3390 return ancestorsp 3391 } 3392 3393 // Put on gfree list. 3394 // If local list is too long, transfer a batch to the global list. 3395 func gfput(_p_ *p, gp *g) { 3396 if readgstatus(gp) != _Gdead { 3397 throw("gfput: bad status (not Gdead)") 3398 } 3399 3400 stksize := gp.stack.hi - gp.stack.lo 3401 3402 if stksize != _FixedStack { 3403 // non-standard stack size - free it. 3404 stackfree(gp.stack) 3405 gp.stack.lo = 0 3406 gp.stack.hi = 0 3407 gp.stackguard0 = 0 3408 } 3409 3410 _p_.gFree.push(gp) 3411 _p_.gFree.n++ 3412 if _p_.gFree.n >= 64 { 3413 lock(&sched.gFree.lock) 3414 for _p_.gFree.n >= 32 { 3415 _p_.gFree.n-- 3416 gp = _p_.gFree.pop() 3417 if gp.stack.lo == 0 { 3418 sched.gFree.noStack.push(gp) 3419 } else { 3420 sched.gFree.stack.push(gp) 3421 } 3422 sched.gFree.n++ 3423 } 3424 unlock(&sched.gFree.lock) 3425 } 3426 } 3427 3428 // Get from gfree list. 3429 // If local list is empty, grab a batch from global list. 3430 func gfget(_p_ *p) *g { 3431 retry: 3432 if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) { 3433 lock(&sched.gFree.lock) 3434 // Move a batch of free Gs to the P. 3435 for _p_.gFree.n < 32 { 3436 // Prefer Gs with stacks. 3437 gp := sched.gFree.stack.pop() 3438 if gp == nil { 3439 gp = sched.gFree.noStack.pop() 3440 if gp == nil { 3441 break 3442 } 3443 } 3444 sched.gFree.n-- 3445 _p_.gFree.push(gp) 3446 _p_.gFree.n++ 3447 } 3448 unlock(&sched.gFree.lock) 3449 goto retry 3450 } 3451 gp := _p_.gFree.pop() 3452 if gp == nil { 3453 return nil 3454 } 3455 _p_.gFree.n-- 3456 if gp.stack.lo == 0 { 3457 // Stack was deallocated in gfput. Allocate a new one. 3458 systemstack(func() { 3459 gp.stack = stackalloc(_FixedStack) 3460 }) 3461 gp.stackguard0 = gp.stack.lo + _StackGuard 3462 } else { 3463 if raceenabled { 3464 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3465 } 3466 if msanenabled { 3467 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3468 } 3469 } 3470 return gp 3471 } 3472 3473 // Purge all cached G's from gfree list to the global list. 3474 func gfpurge(_p_ *p) { 3475 lock(&sched.gFree.lock) 3476 for !_p_.gFree.empty() { 3477 gp := _p_.gFree.pop() 3478 _p_.gFree.n-- 3479 if gp.stack.lo == 0 { 3480 sched.gFree.noStack.push(gp) 3481 } else { 3482 sched.gFree.stack.push(gp) 3483 } 3484 sched.gFree.n++ 3485 } 3486 unlock(&sched.gFree.lock) 3487 } 3488 3489 // Breakpoint executes a breakpoint trap. 3490 func Breakpoint() { 3491 breakpoint() 3492 } 3493 3494 // dolockOSThread is called by LockOSThread and lockOSThread below 3495 // after they modify m.locked. Do not allow preemption during this call, 3496 // or else the m might be different in this function than in the caller. 3497 //go:nosplit 3498 func dolockOSThread() { 3499 if GOARCH == "wasm" { 3500 return // no threads on wasm yet 3501 } 3502 _g_ := getg() 3503 _g_.m.lockedg.set(_g_) 3504 _g_.lockedm.set(_g_.m) 3505 } 3506 3507 //go:nosplit 3508 3509 // LockOSThread wires the calling goroutine to its current operating system thread. 3510 // The calling goroutine will always execute in that thread, 3511 // and no other goroutine will execute in it, 3512 // until the calling goroutine has made as many calls to 3513 // UnlockOSThread as to LockOSThread. 3514 // If the calling goroutine exits without unlocking the thread, 3515 // the thread will be terminated. 3516 // 3517 // All init functions are run on the startup thread. Calling LockOSThread 3518 // from an init function will cause the main function to be invoked on 3519 // that thread. 3520 // 3521 // A goroutine should call LockOSThread before calling OS services or 3522 // non-Go library functions that depend on per-thread state. 3523 func LockOSThread() { 3524 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" { 3525 // If we need to start a new thread from the locked 3526 // thread, we need the template thread. Start it now 3527 // while we're in a known-good state. 3528 startTemplateThread() 3529 } 3530 _g_ := getg() 3531 _g_.m.lockedExt++ 3532 if _g_.m.lockedExt == 0 { 3533 _g_.m.lockedExt-- 3534 panic("LockOSThread nesting overflow") 3535 } 3536 dolockOSThread() 3537 } 3538 3539 //go:nosplit 3540 func lockOSThread() { 3541 getg().m.lockedInt++ 3542 dolockOSThread() 3543 } 3544 3545 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 3546 // after they update m->locked. Do not allow preemption during this call, 3547 // or else the m might be in different in this function than in the caller. 3548 //go:nosplit 3549 func dounlockOSThread() { 3550 if GOARCH == "wasm" { 3551 return // no threads on wasm yet 3552 } 3553 _g_ := getg() 3554 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 { 3555 return 3556 } 3557 _g_.m.lockedg = 0 3558 _g_.lockedm = 0 3559 } 3560 3561 //go:nosplit 3562 3563 // UnlockOSThread undoes an earlier call to LockOSThread. 3564 // If this drops the number of active LockOSThread calls on the 3565 // calling goroutine to zero, it unwires the calling goroutine from 3566 // its fixed operating system thread. 3567 // If there are no active LockOSThread calls, this is a no-op. 3568 // 3569 // Before calling UnlockOSThread, the caller must ensure that the OS 3570 // thread is suitable for running other goroutines. If the caller made 3571 // any permanent changes to the state of the thread that would affect 3572 // other goroutines, it should not call this function and thus leave 3573 // the goroutine locked to the OS thread until the goroutine (and 3574 // hence the thread) exits. 3575 func UnlockOSThread() { 3576 _g_ := getg() 3577 if _g_.m.lockedExt == 0 { 3578 return 3579 } 3580 _g_.m.lockedExt-- 3581 dounlockOSThread() 3582 } 3583 3584 //go:nosplit 3585 func unlockOSThread() { 3586 _g_ := getg() 3587 if _g_.m.lockedInt == 0 { 3588 systemstack(badunlockosthread) 3589 } 3590 _g_.m.lockedInt-- 3591 dounlockOSThread() 3592 } 3593 3594 func badunlockosthread() { 3595 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 3596 } 3597 3598 func gcount() int32 { 3599 n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys)) 3600 for _, _p_ := range allp { 3601 n -= _p_.gFree.n 3602 } 3603 3604 // All these variables can be changed concurrently, so the result can be inconsistent. 3605 // But at least the current goroutine is running. 3606 if n < 1 { 3607 n = 1 3608 } 3609 return n 3610 } 3611 3612 func mcount() int32 { 3613 return int32(sched.mnext - sched.nmfreed) 3614 } 3615 3616 var prof struct { 3617 signalLock uint32 3618 hz int32 3619 } 3620 3621 func _System() { _System() } 3622 func _ExternalCode() { _ExternalCode() } 3623 func _LostExternalCode() { _LostExternalCode() } 3624 func _GC() { _GC() } 3625 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } 3626 func _VDSO() { _VDSO() } 3627 3628 // Counts SIGPROFs received while in atomic64 critical section, on mips{,le} 3629 var lostAtomic64Count uint64 3630 3631 // Called if we receive a SIGPROF signal. 3632 // Called by the signal handler, may run during STW. 3633 //go:nowritebarrierrec 3634 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 3635 if prof.hz == 0 { 3636 return 3637 } 3638 3639 // On mips{,le}, 64bit atomics are emulated with spinlocks, in 3640 // runtime/internal/atomic. If SIGPROF arrives while the program is inside 3641 // the critical section, it creates a deadlock (when writing the sample). 3642 // As a workaround, create a counter of SIGPROFs while in critical section 3643 // to store the count, and pass it to sigprof.add() later when SIGPROF is 3644 // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc). 3645 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" { 3646 if f := findfunc(pc); f.valid() { 3647 if hasPrefix(funcname(f), "runtime/internal/atomic") { 3648 lostAtomic64Count++ 3649 return 3650 } 3651 } 3652 } 3653 3654 // Profiling runs concurrently with GC, so it must not allocate. 3655 // Set a trap in case the code does allocate. 3656 // Note that on windows, one thread takes profiles of all the 3657 // other threads, so mp is usually not getg().m. 3658 // In fact mp may not even be stopped. 3659 // See golang.org/issue/17165. 3660 getg().m.mallocing++ 3661 3662 // Define that a "user g" is a user-created goroutine, and a "system g" 3663 // is one that is m->g0 or m->gsignal. 3664 // 3665 // We might be interrupted for profiling halfway through a 3666 // goroutine switch. The switch involves updating three (or four) values: 3667 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 3668 // because once it gets updated the new g is running. 3669 // 3670 // When switching from a user g to a system g, LR is not considered live, 3671 // so the update only affects g, SP, and PC. Since PC must be last, there 3672 // the possible partial transitions in ordinary execution are (1) g alone is updated, 3673 // (2) both g and SP are updated, and (3) SP alone is updated. 3674 // If SP or g alone is updated, we can detect the partial transition by checking 3675 // whether the SP is within g's stack bounds. (We could also require that SP 3676 // be changed only after g, but the stack bounds check is needed by other 3677 // cases, so there is no need to impose an additional requirement.) 3678 // 3679 // There is one exceptional transition to a system g, not in ordinary execution. 3680 // When a signal arrives, the operating system starts the signal handler running 3681 // with an updated PC and SP. The g is updated last, at the beginning of the 3682 // handler. There are two reasons this is okay. First, until g is updated the 3683 // g and SP do not match, so the stack bounds check detects the partial transition. 3684 // Second, signal handlers currently run with signals disabled, so a profiling 3685 // signal cannot arrive during the handler. 3686 // 3687 // When switching from a system g to a user g, there are three possibilities. 3688 // 3689 // First, it may be that the g switch has no PC update, because the SP 3690 // either corresponds to a user g throughout (as in asmcgocall) 3691 // or because it has been arranged to look like a user g frame 3692 // (as in cgocallback_gofunc). In this case, since the entire 3693 // transition is a g+SP update, a partial transition updating just one of 3694 // those will be detected by the stack bounds check. 3695 // 3696 // Second, when returning from a signal handler, the PC and SP updates 3697 // are performed by the operating system in an atomic update, so the g 3698 // update must be done before them. The stack bounds check detects 3699 // the partial transition here, and (again) signal handlers run with signals 3700 // disabled, so a profiling signal cannot arrive then anyway. 3701 // 3702 // Third, the common case: it may be that the switch updates g, SP, and PC 3703 // separately. If the PC is within any of the functions that does this, 3704 // we don't ask for a traceback. C.F. the function setsSP for more about this. 3705 // 3706 // There is another apparently viable approach, recorded here in case 3707 // the "PC within setsSP function" check turns out not to be usable. 3708 // It would be possible to delay the update of either g or SP until immediately 3709 // before the PC update instruction. Then, because of the stack bounds check, 3710 // the only problematic interrupt point is just before that PC update instruction, 3711 // and the sigprof handler can detect that instruction and simulate stepping past 3712 // it in order to reach a consistent state. On ARM, the update of g must be made 3713 // in two places (in R10 and also in a TLS slot), so the delayed update would 3714 // need to be the SP update. The sigprof handler must read the instruction at 3715 // the current PC and if it was the known instruction (for example, JMP BX or 3716 // MOV R2, PC), use that other register in place of the PC value. 3717 // The biggest drawback to this solution is that it requires that we can tell 3718 // whether it's safe to read from the memory pointed at by PC. 3719 // In a correct program, we can test PC == nil and otherwise read, 3720 // but if a profiling signal happens at the instant that a program executes 3721 // a bad jump (before the program manages to handle the resulting fault) 3722 // the profiling handler could fault trying to read nonexistent memory. 3723 // 3724 // To recap, there are no constraints on the assembly being used for the 3725 // transition. We simply require that g and SP match and that the PC is not 3726 // in gogo. 3727 traceback := true 3728 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) || (mp != nil && mp.vdsoSP != 0) { 3729 traceback = false 3730 } 3731 var stk [maxCPUProfStack]uintptr 3732 n := 0 3733 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 3734 cgoOff := 0 3735 // Check cgoCallersUse to make sure that we are not 3736 // interrupting other code that is fiddling with 3737 // cgoCallers. We are running in a signal handler 3738 // with all signals blocked, so we don't have to worry 3739 // about any other code interrupting us. 3740 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 { 3741 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { 3742 cgoOff++ 3743 } 3744 copy(stk[:], mp.cgoCallers[:cgoOff]) 3745 mp.cgoCallers[0] = 0 3746 } 3747 3748 // Collect Go stack that leads to the cgo call. 3749 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0) 3750 if n > 0 { 3751 n += cgoOff 3752 } 3753 } else if traceback { 3754 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 3755 } 3756 3757 if n <= 0 { 3758 // Normal traceback is impossible or has failed. 3759 // See if it falls into several common cases. 3760 n = 0 3761 if (GOOS == "windows" || GOOS == "solaris" || GOOS == "darwin") && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 3762 // Libcall, i.e. runtime syscall on windows. 3763 // Collect Go stack that leads to the call. 3764 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 3765 } 3766 if n == 0 && mp != nil && mp.vdsoSP != 0 { 3767 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 3768 } 3769 if n == 0 { 3770 // If all of the above has failed, account it against abstract "System" or "GC". 3771 n = 2 3772 if inVDSOPage(pc) { 3773 pc = funcPC(_VDSO) + sys.PCQuantum 3774 } else if pc > firstmoduledata.etext { 3775 // "ExternalCode" is better than "etext". 3776 pc = funcPC(_ExternalCode) + sys.PCQuantum 3777 } 3778 stk[0] = pc 3779 if mp.preemptoff != "" { 3780 stk[1] = funcPC(_GC) + sys.PCQuantum 3781 } else { 3782 stk[1] = funcPC(_System) + sys.PCQuantum 3783 } 3784 } 3785 } 3786 3787 if prof.hz != 0 { 3788 if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm") && lostAtomic64Count > 0 { 3789 cpuprof.addLostAtomic64(lostAtomic64Count) 3790 lostAtomic64Count = 0 3791 } 3792 cpuprof.add(gp, stk[:n]) 3793 } 3794 getg().m.mallocing-- 3795 } 3796 3797 // If the signal handler receives a SIGPROF signal on a non-Go thread, 3798 // it tries to collect a traceback into sigprofCallers. 3799 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback. 3800 var sigprofCallers cgoCallers 3801 var sigprofCallersUse uint32 3802 3803 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, 3804 // and the signal handler collected a stack trace in sigprofCallers. 3805 // When this is called, sigprofCallersUse will be non-zero. 3806 // g is nil, and what we can do is very limited. 3807 //go:nosplit 3808 //go:nowritebarrierrec 3809 func sigprofNonGo() { 3810 if prof.hz != 0 { 3811 n := 0 3812 for n < len(sigprofCallers) && sigprofCallers[n] != 0 { 3813 n++ 3814 } 3815 cpuprof.addNonGo(sigprofCallers[:n]) 3816 } 3817 3818 atomic.Store(&sigprofCallersUse, 0) 3819 } 3820 3821 // sigprofNonGoPC is called when a profiling signal arrived on a 3822 // non-Go thread and we have a single PC value, not a stack trace. 3823 // g is nil, and what we can do is very limited. 3824 //go:nosplit 3825 //go:nowritebarrierrec 3826 func sigprofNonGoPC(pc uintptr) { 3827 if prof.hz != 0 { 3828 stk := []uintptr{ 3829 pc, 3830 funcPC(_ExternalCode) + sys.PCQuantum, 3831 } 3832 cpuprof.addNonGo(stk) 3833 } 3834 } 3835 3836 // Reports whether a function will set the SP 3837 // to an absolute value. Important that 3838 // we don't traceback when these are at the bottom 3839 // of the stack since we can't be sure that we will 3840 // find the caller. 3841 // 3842 // If the function is not on the bottom of the stack 3843 // we assume that it will have set it up so that traceback will be consistent, 3844 // either by being a traceback terminating function 3845 // or putting one on the stack at the right offset. 3846 func setsSP(pc uintptr) bool { 3847 f := findfunc(pc) 3848 if !f.valid() { 3849 // couldn't find the function for this PC, 3850 // so assume the worst and stop traceback 3851 return true 3852 } 3853 switch f.funcID { 3854 case funcID_gogo, funcID_systemstack, funcID_mcall, funcID_morestack: 3855 return true 3856 } 3857 return false 3858 } 3859 3860 // setcpuprofilerate sets the CPU profiling rate to hz times per second. 3861 // If hz <= 0, setcpuprofilerate turns off CPU profiling. 3862 func setcpuprofilerate(hz int32) { 3863 // Force sane arguments. 3864 if hz < 0 { 3865 hz = 0 3866 } 3867 3868 // Disable preemption, otherwise we can be rescheduled to another thread 3869 // that has profiling enabled. 3870 _g_ := getg() 3871 _g_.m.locks++ 3872 3873 // Stop profiler on this thread so that it is safe to lock prof. 3874 // if a profiling signal came in while we had prof locked, 3875 // it would deadlock. 3876 setThreadCPUProfiler(0) 3877 3878 for !atomic.Cas(&prof.signalLock, 0, 1) { 3879 osyield() 3880 } 3881 if prof.hz != hz { 3882 setProcessCPUProfiler(hz) 3883 prof.hz = hz 3884 } 3885 atomic.Store(&prof.signalLock, 0) 3886 3887 lock(&sched.lock) 3888 sched.profilehz = hz 3889 unlock(&sched.lock) 3890 3891 if hz != 0 { 3892 setThreadCPUProfiler(hz) 3893 } 3894 3895 _g_.m.locks-- 3896 } 3897 3898 // Change number of processors. The world is stopped, sched is locked. 3899 // gcworkbufs are not being modified by either the GC or 3900 // the write barrier code. 3901 // Returns list of Ps with local work, they need to be scheduled by the caller. 3902 func procresize(nprocs int32) *p { 3903 old := gomaxprocs 3904 if old < 0 || nprocs <= 0 { 3905 throw("procresize: invalid arg") 3906 } 3907 if trace.enabled { 3908 traceGomaxprocs(nprocs) 3909 } 3910 3911 // update statistics 3912 now := nanotime() 3913 if sched.procresizetime != 0 { 3914 sched.totaltime += int64(old) * (now - sched.procresizetime) 3915 } 3916 sched.procresizetime = now 3917 3918 // Grow allp if necessary. 3919 if nprocs > int32(len(allp)) { 3920 // Synchronize with retake, which could be running 3921 // concurrently since it doesn't run on a P. 3922 lock(&allpLock) 3923 if nprocs <= int32(cap(allp)) { 3924 allp = allp[:nprocs] 3925 } else { 3926 nallp := make([]*p, nprocs) 3927 // Copy everything up to allp's cap so we 3928 // never lose old allocated Ps. 3929 copy(nallp, allp[:cap(allp)]) 3930 allp = nallp 3931 } 3932 unlock(&allpLock) 3933 } 3934 3935 // initialize new P's 3936 for i := int32(0); i < nprocs; i++ { 3937 pp := allp[i] 3938 if pp == nil { 3939 pp = new(p) 3940 pp.id = i 3941 pp.status = _Pgcstop 3942 pp.sudogcache = pp.sudogbuf[:0] 3943 for i := range pp.deferpool { 3944 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 3945 } 3946 pp.wbBuf.reset() 3947 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 3948 } 3949 if pp.mcache == nil { 3950 if old == 0 && i == 0 { 3951 if getg().m.mcache == nil { 3952 throw("missing mcache?") 3953 } 3954 pp.mcache = getg().m.mcache // bootstrap 3955 } else { 3956 pp.mcache = allocmcache() 3957 } 3958 } 3959 if raceenabled && pp.racectx == 0 { 3960 if old == 0 && i == 0 { 3961 pp.racectx = raceprocctx0 3962 raceprocctx0 = 0 // bootstrap 3963 } else { 3964 pp.racectx = raceproccreate() 3965 } 3966 } 3967 } 3968 3969 // free unused P's 3970 for i := nprocs; i < old; i++ { 3971 p := allp[i] 3972 if trace.enabled && p == getg().m.p.ptr() { 3973 // moving to p[0], pretend that we were descheduled 3974 // and then scheduled again to keep the trace sane. 3975 traceGoSched() 3976 traceProcStop(p) 3977 } 3978 // move all runnable goroutines to the global queue 3979 for p.runqhead != p.runqtail { 3980 // pop from tail of local queue 3981 p.runqtail-- 3982 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr() 3983 // push onto head of global queue 3984 globrunqputhead(gp) 3985 } 3986 if p.runnext != 0 { 3987 globrunqputhead(p.runnext.ptr()) 3988 p.runnext = 0 3989 } 3990 // if there's a background worker, make it runnable and put 3991 // it on the global queue so it can clean itself up 3992 if gp := p.gcBgMarkWorker.ptr(); gp != nil { 3993 casgstatus(gp, _Gwaiting, _Grunnable) 3994 if trace.enabled { 3995 traceGoUnpark(gp, 0) 3996 } 3997 globrunqput(gp) 3998 // This assignment doesn't race because the 3999 // world is stopped. 4000 p.gcBgMarkWorker.set(nil) 4001 } 4002 // Flush p's write barrier buffer. 4003 if gcphase != _GCoff { 4004 wbBufFlush1(p) 4005 p.gcw.dispose() 4006 } 4007 for i := range p.sudogbuf { 4008 p.sudogbuf[i] = nil 4009 } 4010 p.sudogcache = p.sudogbuf[:0] 4011 for i := range p.deferpool { 4012 for j := range p.deferpoolbuf[i] { 4013 p.deferpoolbuf[i][j] = nil 4014 } 4015 p.deferpool[i] = p.deferpoolbuf[i][:0] 4016 } 4017 freemcache(p.mcache) 4018 p.mcache = nil 4019 gfpurge(p) 4020 traceProcFree(p) 4021 if raceenabled { 4022 raceprocdestroy(p.racectx) 4023 p.racectx = 0 4024 } 4025 p.gcAssistTime = 0 4026 p.status = _Pdead 4027 // can't free P itself because it can be referenced by an M in syscall 4028 } 4029 4030 // Trim allp. 4031 if int32(len(allp)) != nprocs { 4032 lock(&allpLock) 4033 allp = allp[:nprocs] 4034 unlock(&allpLock) 4035 } 4036 4037 _g_ := getg() 4038 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 4039 // continue to use the current P 4040 _g_.m.p.ptr().status = _Prunning 4041 _g_.m.p.ptr().mcache.prepareForSweep() 4042 } else { 4043 // release the current P and acquire allp[0] 4044 if _g_.m.p != 0 { 4045 _g_.m.p.ptr().m = 0 4046 } 4047 _g_.m.p = 0 4048 _g_.m.mcache = nil 4049 p := allp[0] 4050 p.m = 0 4051 p.status = _Pidle 4052 acquirep(p) 4053 if trace.enabled { 4054 traceGoStart() 4055 } 4056 } 4057 var runnablePs *p 4058 for i := nprocs - 1; i >= 0; i-- { 4059 p := allp[i] 4060 if _g_.m.p.ptr() == p { 4061 continue 4062 } 4063 p.status = _Pidle 4064 if runqempty(p) { 4065 pidleput(p) 4066 } else { 4067 p.m.set(mget()) 4068 p.link.set(runnablePs) 4069 runnablePs = p 4070 } 4071 } 4072 stealOrder.reset(uint32(nprocs)) 4073 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 4074 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 4075 return runnablePs 4076 } 4077 4078 // Associate p and the current m. 4079 // 4080 // This function is allowed to have write barriers even if the caller 4081 // isn't because it immediately acquires _p_. 4082 // 4083 //go:yeswritebarrierrec 4084 func acquirep(_p_ *p) { 4085 // Do the part that isn't allowed to have write barriers. 4086 wirep(_p_) 4087 4088 // Have p; write barriers now allowed. 4089 4090 // Perform deferred mcache flush before this P can allocate 4091 // from a potentially stale mcache. 4092 _p_.mcache.prepareForSweep() 4093 4094 if trace.enabled { 4095 traceProcStart() 4096 } 4097 } 4098 4099 // wirep is the first step of acquirep, which actually associates the 4100 // current M to _p_. This is broken out so we can disallow write 4101 // barriers for this part, since we don't yet have a P. 4102 // 4103 //go:nowritebarrierrec 4104 //go:nosplit 4105 func wirep(_p_ *p) { 4106 _g_ := getg() 4107 4108 if _g_.m.p != 0 || _g_.m.mcache != nil { 4109 throw("wirep: already in go") 4110 } 4111 if _p_.m != 0 || _p_.status != _Pidle { 4112 id := int64(0) 4113 if _p_.m != 0 { 4114 id = _p_.m.ptr().id 4115 } 4116 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 4117 throw("wirep: invalid p state") 4118 } 4119 _g_.m.mcache = _p_.mcache 4120 _g_.m.p.set(_p_) 4121 _p_.m.set(_g_.m) 4122 _p_.status = _Prunning 4123 } 4124 4125 // Disassociate p and the current m. 4126 func releasep() *p { 4127 _g_ := getg() 4128 4129 if _g_.m.p == 0 || _g_.m.mcache == nil { 4130 throw("releasep: invalid arg") 4131 } 4132 _p_ := _g_.m.p.ptr() 4133 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 4134 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 4135 throw("releasep: invalid p state") 4136 } 4137 if trace.enabled { 4138 traceProcStop(_g_.m.p.ptr()) 4139 } 4140 _g_.m.p = 0 4141 _g_.m.mcache = nil 4142 _p_.m = 0 4143 _p_.status = _Pidle 4144 return _p_ 4145 } 4146 4147 func incidlelocked(v int32) { 4148 lock(&sched.lock) 4149 sched.nmidlelocked += v 4150 if v > 0 { 4151 checkdead() 4152 } 4153 unlock(&sched.lock) 4154 } 4155 4156 // Check for deadlock situation. 4157 // The check is based on number of running M's, if 0 -> deadlock. 4158 // sched.lock must be held. 4159 func checkdead() { 4160 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 4161 // there are no running goroutines. The calling program is 4162 // assumed to be running. 4163 if islibrary || isarchive { 4164 return 4165 } 4166 4167 // If we are dying because of a signal caught on an already idle thread, 4168 // freezetheworld will cause all running threads to block. 4169 // And runtime will essentially enter into deadlock state, 4170 // except that there is a thread that will call exit soon. 4171 if panicking > 0 { 4172 return 4173 } 4174 4175 // If we are not running under cgo, but we have an extra M then account 4176 // for it. (It is possible to have an extra M on Windows without cgo to 4177 // accommodate callbacks created by syscall.NewCallback. See issue #6751 4178 // for details.) 4179 var run0 int32 4180 if !iscgo && cgoHasExtraM { 4181 run0 = 1 4182 } 4183 4184 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys 4185 if run > run0 { 4186 return 4187 } 4188 if run < 0 { 4189 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n") 4190 throw("checkdead: inconsistent counts") 4191 } 4192 4193 grunning := 0 4194 lock(&allglock) 4195 for i := 0; i < len(allgs); i++ { 4196 gp := allgs[i] 4197 if isSystemGoroutine(gp, false) { 4198 continue 4199 } 4200 s := readgstatus(gp) 4201 switch s &^ _Gscan { 4202 case _Gwaiting: 4203 grunning++ 4204 case _Grunnable, 4205 _Grunning, 4206 _Gsyscall: 4207 unlock(&allglock) 4208 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 4209 throw("checkdead: runnable g") 4210 } 4211 } 4212 unlock(&allglock) 4213 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 4214 throw("no goroutines (main called runtime.Goexit) - deadlock!") 4215 } 4216 4217 // Maybe jump time forward for playground. 4218 gp := timejump() 4219 if gp != nil { 4220 casgstatus(gp, _Gwaiting, _Grunnable) 4221 globrunqput(gp) 4222 _p_ := pidleget() 4223 if _p_ == nil { 4224 throw("checkdead: no p for timer") 4225 } 4226 mp := mget() 4227 if mp == nil { 4228 // There should always be a free M since 4229 // nothing is running. 4230 throw("checkdead: no m for timer") 4231 } 4232 mp.nextp.set(_p_) 4233 notewakeup(&mp.park) 4234 return 4235 } 4236 4237 getg().m.throwing = -1 // do not dump full stacks 4238 throw("all goroutines are asleep - deadlock!") 4239 } 4240 4241 // forcegcperiod is the maximum time in nanoseconds between garbage 4242 // collections. If we go this long without a garbage collection, one 4243 // is forced to run. 4244 // 4245 // This is a variable for testing purposes. It normally doesn't change. 4246 var forcegcperiod int64 = 2 * 60 * 1e9 4247 4248 // Always runs without a P, so write barriers are not allowed. 4249 // 4250 //go:nowritebarrierrec 4251 func sysmon() { 4252 lock(&sched.lock) 4253 sched.nmsys++ 4254 checkdead() 4255 unlock(&sched.lock) 4256 4257 // If a heap span goes unused for 5 minutes after a garbage collection, 4258 // we hand it back to the operating system. 4259 scavengelimit := int64(5 * 60 * 1e9) 4260 4261 if debug.scavenge > 0 { 4262 // Scavenge-a-lot for testing. 4263 forcegcperiod = 10 * 1e6 4264 scavengelimit = 20 * 1e6 4265 } 4266 4267 lastscavenge := nanotime() 4268 nscavenge := 0 4269 4270 lasttrace := int64(0) 4271 idle := 0 // how many cycles in succession we had not wokeup somebody 4272 delay := uint32(0) 4273 for { 4274 if idle == 0 { // start with 20us sleep... 4275 delay = 20 4276 } else if idle > 50 { // start doubling the sleep after 1ms... 4277 delay *= 2 4278 } 4279 if delay > 10*1000 { // up to 10ms 4280 delay = 10 * 1000 4281 } 4282 usleep(delay) 4283 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { 4284 lock(&sched.lock) 4285 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 4286 atomic.Store(&sched.sysmonwait, 1) 4287 unlock(&sched.lock) 4288 // Make wake-up period small enough 4289 // for the sampling to be correct. 4290 maxsleep := forcegcperiod / 2 4291 if scavengelimit < forcegcperiod { 4292 maxsleep = scavengelimit / 2 4293 } 4294 shouldRelax := true 4295 if osRelaxMinNS > 0 { 4296 next := timeSleepUntil() 4297 now := nanotime() 4298 if next-now < osRelaxMinNS { 4299 shouldRelax = false 4300 } 4301 } 4302 if shouldRelax { 4303 osRelax(true) 4304 } 4305 notetsleep(&sched.sysmonnote, maxsleep) 4306 if shouldRelax { 4307 osRelax(false) 4308 } 4309 lock(&sched.lock) 4310 atomic.Store(&sched.sysmonwait, 0) 4311 noteclear(&sched.sysmonnote) 4312 idle = 0 4313 delay = 20 4314 } 4315 unlock(&sched.lock) 4316 } 4317 // trigger libc interceptors if needed 4318 if *cgo_yield != nil { 4319 asmcgocall(*cgo_yield, nil) 4320 } 4321 // poll network if not polled for more than 10ms 4322 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 4323 now := nanotime() 4324 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now { 4325 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 4326 list := netpoll(false) // non-blocking - returns list of goroutines 4327 if !list.empty() { 4328 // Need to decrement number of idle locked M's 4329 // (pretending that one more is running) before injectglist. 4330 // Otherwise it can lead to the following situation: 4331 // injectglist grabs all P's but before it starts M's to run the P's, 4332 // another M returns from syscall, finishes running its G, 4333 // observes that there is no work to do and no other running M's 4334 // and reports deadlock. 4335 incidlelocked(-1) 4336 injectglist(&list) 4337 incidlelocked(1) 4338 } 4339 } 4340 // retake P's blocked in syscalls 4341 // and preempt long running G's 4342 if retake(now) != 0 { 4343 idle = 0 4344 } else { 4345 idle++ 4346 } 4347 // check if we need to force a GC 4348 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { 4349 lock(&forcegc.lock) 4350 forcegc.idle = 0 4351 var list gList 4352 list.push(forcegc.g) 4353 injectglist(&list) 4354 unlock(&forcegc.lock) 4355 } 4356 // scavenge heap once in a while 4357 if lastscavenge+scavengelimit/2 < now { 4358 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) 4359 lastscavenge = now 4360 nscavenge++ 4361 } 4362 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { 4363 lasttrace = now 4364 schedtrace(debug.scheddetail > 0) 4365 } 4366 } 4367 } 4368 4369 type sysmontick struct { 4370 schedtick uint32 4371 schedwhen int64 4372 syscalltick uint32 4373 syscallwhen int64 4374 } 4375 4376 // forcePreemptNS is the time slice given to a G before it is 4377 // preempted. 4378 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 4379 4380 func retake(now int64) uint32 { 4381 n := 0 4382 // Prevent allp slice changes. This lock will be completely 4383 // uncontended unless we're already stopping the world. 4384 lock(&allpLock) 4385 // We can't use a range loop over allp because we may 4386 // temporarily drop the allpLock. Hence, we need to re-fetch 4387 // allp each time around the loop. 4388 for i := 0; i < len(allp); i++ { 4389 _p_ := allp[i] 4390 if _p_ == nil { 4391 // This can happen if procresize has grown 4392 // allp but not yet created new Ps. 4393 continue 4394 } 4395 pd := &_p_.sysmontick 4396 s := _p_.status 4397 if s == _Psyscall { 4398 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 4399 t := int64(_p_.syscalltick) 4400 if int64(pd.syscalltick) != t { 4401 pd.syscalltick = uint32(t) 4402 pd.syscallwhen = now 4403 continue 4404 } 4405 // On the one hand we don't want to retake Ps if there is no other work to do, 4406 // but on the other hand we want to retake them eventually 4407 // because they can prevent the sysmon thread from deep sleep. 4408 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 4409 continue 4410 } 4411 // Drop allpLock so we can take sched.lock. 4412 unlock(&allpLock) 4413 // Need to decrement number of idle locked M's 4414 // (pretending that one more is running) before the CAS. 4415 // Otherwise the M from which we retake can exit the syscall, 4416 // increment nmidle and report deadlock. 4417 incidlelocked(-1) 4418 if atomic.Cas(&_p_.status, s, _Pidle) { 4419 if trace.enabled { 4420 traceGoSysBlock(_p_) 4421 traceProcStop(_p_) 4422 } 4423 n++ 4424 _p_.syscalltick++ 4425 handoffp(_p_) 4426 } 4427 incidlelocked(1) 4428 lock(&allpLock) 4429 } else if s == _Prunning { 4430 // Preempt G if it's running for too long. 4431 t := int64(_p_.schedtick) 4432 if int64(pd.schedtick) != t { 4433 pd.schedtick = uint32(t) 4434 pd.schedwhen = now 4435 continue 4436 } 4437 if pd.schedwhen+forcePreemptNS > now { 4438 continue 4439 } 4440 preemptone(_p_) 4441 } 4442 } 4443 unlock(&allpLock) 4444 return uint32(n) 4445 } 4446 4447 // Tell all goroutines that they have been preempted and they should stop. 4448 // This function is purely best-effort. It can fail to inform a goroutine if a 4449 // processor just started running it. 4450 // No locks need to be held. 4451 // Returns true if preemption request was issued to at least one goroutine. 4452 func preemptall() bool { 4453 res := false 4454 for _, _p_ := range allp { 4455 if _p_.status != _Prunning { 4456 continue 4457 } 4458 if preemptone(_p_) { 4459 res = true 4460 } 4461 } 4462 return res 4463 } 4464 4465 // Tell the goroutine running on processor P to stop. 4466 // This function is purely best-effort. It can incorrectly fail to inform the 4467 // goroutine. It can send inform the wrong goroutine. Even if it informs the 4468 // correct goroutine, that goroutine might ignore the request if it is 4469 // simultaneously executing newstack. 4470 // No lock needs to be held. 4471 // Returns true if preemption request was issued. 4472 // The actual preemption will happen at some point in the future 4473 // and will be indicated by the gp->status no longer being 4474 // Grunning 4475 func preemptone(_p_ *p) bool { 4476 mp := _p_.m.ptr() 4477 if mp == nil || mp == getg().m { 4478 return false 4479 } 4480 gp := mp.curg 4481 if gp == nil || gp == mp.g0 { 4482 return false 4483 } 4484 4485 gp.preempt = true 4486 4487 // Every call in a go routine checks for stack overflow by 4488 // comparing the current stack pointer to gp->stackguard0. 4489 // Setting gp->stackguard0 to StackPreempt folds 4490 // preemption into the normal stack overflow check. 4491 gp.stackguard0 = stackPreempt 4492 return true 4493 } 4494 4495 var starttime int64 4496 4497 func schedtrace(detailed bool) { 4498 now := nanotime() 4499 if starttime == 0 { 4500 starttime = now 4501 } 4502 4503 lock(&sched.lock) 4504 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 4505 if detailed { 4506 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 4507 } 4508 // We must be careful while reading data from P's, M's and G's. 4509 // Even if we hold schedlock, most data can be changed concurrently. 4510 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 4511 for i, _p_ := range allp { 4512 mp := _p_.m.ptr() 4513 h := atomic.Load(&_p_.runqhead) 4514 t := atomic.Load(&_p_.runqtail) 4515 if detailed { 4516 id := int64(-1) 4517 if mp != nil { 4518 id = mp.id 4519 } 4520 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, "\n") 4521 } else { 4522 // In non-detailed mode format lengths of per-P run queues as: 4523 // [len1 len2 len3 len4] 4524 print(" ") 4525 if i == 0 { 4526 print("[") 4527 } 4528 print(t - h) 4529 if i == len(allp)-1 { 4530 print("]\n") 4531 } 4532 } 4533 } 4534 4535 if !detailed { 4536 unlock(&sched.lock) 4537 return 4538 } 4539 4540 for mp := allm; mp != nil; mp = mp.alllink { 4541 _p_ := mp.p.ptr() 4542 gp := mp.curg 4543 lockedg := mp.lockedg.ptr() 4544 id1 := int32(-1) 4545 if _p_ != nil { 4546 id1 = _p_.id 4547 } 4548 id2 := int64(-1) 4549 if gp != nil { 4550 id2 = gp.goid 4551 } 4552 id3 := int64(-1) 4553 if lockedg != nil { 4554 id3 = lockedg.goid 4555 } 4556 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") 4557 } 4558 4559 lock(&allglock) 4560 for gi := 0; gi < len(allgs); gi++ { 4561 gp := allgs[gi] 4562 mp := gp.m 4563 lockedm := gp.lockedm.ptr() 4564 id1 := int64(-1) 4565 if mp != nil { 4566 id1 = mp.id 4567 } 4568 id2 := int64(-1) 4569 if lockedm != nil { 4570 id2 = lockedm.id 4571 } 4572 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n") 4573 } 4574 unlock(&allglock) 4575 unlock(&sched.lock) 4576 } 4577 4578 // schedEnableUser enables or disables the scheduling of user 4579 // goroutines. 4580 // 4581 // This does not stop already running user goroutines, so the caller 4582 // should first stop the world when disabling user goroutines. 4583 func schedEnableUser(enable bool) { 4584 lock(&sched.lock) 4585 if sched.disable.user == !enable { 4586 unlock(&sched.lock) 4587 return 4588 } 4589 sched.disable.user = !enable 4590 if enable { 4591 n := sched.disable.n 4592 sched.disable.n = 0 4593 globrunqputbatch(&sched.disable.runnable, n) 4594 unlock(&sched.lock) 4595 for ; n != 0 && sched.npidle != 0; n-- { 4596 startm(nil, false) 4597 } 4598 } else { 4599 unlock(&sched.lock) 4600 } 4601 } 4602 4603 // schedEnabled reports whether gp should be scheduled. It returns 4604 // false is scheduling of gp is disabled. 4605 func schedEnabled(gp *g) bool { 4606 if sched.disable.user { 4607 return isSystemGoroutine(gp, true) 4608 } 4609 return true 4610 } 4611 4612 // Put mp on midle list. 4613 // Sched must be locked. 4614 // May run during STW, so write barriers are not allowed. 4615 //go:nowritebarrierrec 4616 func mput(mp *m) { 4617 mp.schedlink = sched.midle 4618 sched.midle.set(mp) 4619 sched.nmidle++ 4620 checkdead() 4621 } 4622 4623 // Try to get an m from midle list. 4624 // Sched must be locked. 4625 // May run during STW, so write barriers are not allowed. 4626 //go:nowritebarrierrec 4627 func mget() *m { 4628 mp := sched.midle.ptr() 4629 if mp != nil { 4630 sched.midle = mp.schedlink 4631 sched.nmidle-- 4632 } 4633 return mp 4634 } 4635 4636 // Put gp on the global runnable queue. 4637 // Sched must be locked. 4638 // May run during STW, so write barriers are not allowed. 4639 //go:nowritebarrierrec 4640 func globrunqput(gp *g) { 4641 sched.runq.pushBack(gp) 4642 sched.runqsize++ 4643 } 4644 4645 // Put gp at the head of the global runnable queue. 4646 // Sched must be locked. 4647 // May run during STW, so write barriers are not allowed. 4648 //go:nowritebarrierrec 4649 func globrunqputhead(gp *g) { 4650 sched.runq.push(gp) 4651 sched.runqsize++ 4652 } 4653 4654 // Put a batch of runnable goroutines on the global runnable queue. 4655 // This clears *batch. 4656 // Sched must be locked. 4657 func globrunqputbatch(batch *gQueue, n int32) { 4658 sched.runq.pushBackAll(*batch) 4659 sched.runqsize += n 4660 *batch = gQueue{} 4661 } 4662 4663 // Try get a batch of G's from the global runnable queue. 4664 // Sched must be locked. 4665 func globrunqget(_p_ *p, max int32) *g { 4666 if sched.runqsize == 0 { 4667 return nil 4668 } 4669 4670 n := sched.runqsize/gomaxprocs + 1 4671 if n > sched.runqsize { 4672 n = sched.runqsize 4673 } 4674 if max > 0 && n > max { 4675 n = max 4676 } 4677 if n > int32(len(_p_.runq))/2 { 4678 n = int32(len(_p_.runq)) / 2 4679 } 4680 4681 sched.runqsize -= n 4682 4683 gp := sched.runq.pop() 4684 n-- 4685 for ; n > 0; n-- { 4686 gp1 := sched.runq.pop() 4687 runqput(_p_, gp1, false) 4688 } 4689 return gp 4690 } 4691 4692 // Put p to on _Pidle list. 4693 // Sched must be locked. 4694 // May run during STW, so write barriers are not allowed. 4695 //go:nowritebarrierrec 4696 func pidleput(_p_ *p) { 4697 if !runqempty(_p_) { 4698 throw("pidleput: P has non-empty run queue") 4699 } 4700 _p_.link = sched.pidle 4701 sched.pidle.set(_p_) 4702 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 4703 } 4704 4705 // Try get a p from _Pidle list. 4706 // Sched must be locked. 4707 // May run during STW, so write barriers are not allowed. 4708 //go:nowritebarrierrec 4709 func pidleget() *p { 4710 _p_ := sched.pidle.ptr() 4711 if _p_ != nil { 4712 sched.pidle = _p_.link 4713 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 4714 } 4715 return _p_ 4716 } 4717 4718 // runqempty reports whether _p_ has no Gs on its local run queue. 4719 // It never returns true spuriously. 4720 func runqempty(_p_ *p) bool { 4721 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, 4722 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext. 4723 // Simply observing that runqhead == runqtail and then observing that runqnext == nil 4724 // does not mean the queue is empty. 4725 for { 4726 head := atomic.Load(&_p_.runqhead) 4727 tail := atomic.Load(&_p_.runqtail) 4728 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext))) 4729 if tail == atomic.Load(&_p_.runqtail) { 4730 return head == tail && runnext == 0 4731 } 4732 } 4733 } 4734 4735 // To shake out latent assumptions about scheduling order, 4736 // we introduce some randomness into scheduling decisions 4737 // when running with the race detector. 4738 // The need for this was made obvious by changing the 4739 // (deterministic) scheduling order in Go 1.5 and breaking 4740 // many poorly-written tests. 4741 // With the randomness here, as long as the tests pass 4742 // consistently with -race, they shouldn't have latent scheduling 4743 // assumptions. 4744 const randomizeScheduler = raceenabled 4745 4746 // runqput tries to put g on the local runnable queue. 4747 // If next is false, runqput adds g to the tail of the runnable queue. 4748 // If next is true, runqput puts g in the _p_.runnext slot. 4749 // If the run queue is full, runnext puts g on the global queue. 4750 // Executed only by the owner P. 4751 func runqput(_p_ *p, gp *g, next bool) { 4752 if randomizeScheduler && next && fastrand()%2 == 0 { 4753 next = false 4754 } 4755 4756 if next { 4757 retryNext: 4758 oldnext := _p_.runnext 4759 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 4760 goto retryNext 4761 } 4762 if oldnext == 0 { 4763 return 4764 } 4765 // Kick the old runnext out to the regular run queue. 4766 gp = oldnext.ptr() 4767 } 4768 4769 retry: 4770 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers 4771 t := _p_.runqtail 4772 if t-h < uint32(len(_p_.runq)) { 4773 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 4774 atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 4775 return 4776 } 4777 if runqputslow(_p_, gp, h, t) { 4778 return 4779 } 4780 // the queue is not full, now the put above must succeed 4781 goto retry 4782 } 4783 4784 // Put g and a batch of work from local runnable queue on global queue. 4785 // Executed only by the owner P. 4786 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 4787 var batch [len(_p_.runq)/2 + 1]*g 4788 4789 // First, grab a batch from local queue. 4790 n := t - h 4791 n = n / 2 4792 if n != uint32(len(_p_.runq)/2) { 4793 throw("runqputslow: queue is not full") 4794 } 4795 for i := uint32(0); i < n; i++ { 4796 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 4797 } 4798 if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4799 return false 4800 } 4801 batch[n] = gp 4802 4803 if randomizeScheduler { 4804 for i := uint32(1); i <= n; i++ { 4805 j := fastrandn(i + 1) 4806 batch[i], batch[j] = batch[j], batch[i] 4807 } 4808 } 4809 4810 // Link the goroutines. 4811 for i := uint32(0); i < n; i++ { 4812 batch[i].schedlink.set(batch[i+1]) 4813 } 4814 var q gQueue 4815 q.head.set(batch[0]) 4816 q.tail.set(batch[n]) 4817 4818 // Now put the batch on global queue. 4819 lock(&sched.lock) 4820 globrunqputbatch(&q, int32(n+1)) 4821 unlock(&sched.lock) 4822 return true 4823 } 4824 4825 // Get g from local runnable queue. 4826 // If inheritTime is true, gp should inherit the remaining time in the 4827 // current time slice. Otherwise, it should start a new time slice. 4828 // Executed only by the owner P. 4829 func runqget(_p_ *p) (gp *g, inheritTime bool) { 4830 // If there's a runnext, it's the next G to run. 4831 for { 4832 next := _p_.runnext 4833 if next == 0 { 4834 break 4835 } 4836 if _p_.runnext.cas(next, 0) { 4837 return next.ptr(), true 4838 } 4839 } 4840 4841 for { 4842 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers 4843 t := _p_.runqtail 4844 if t == h { 4845 return nil, false 4846 } 4847 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 4848 if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume 4849 return gp, false 4850 } 4851 } 4852 } 4853 4854 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 4855 // Batch is a ring buffer starting at batchHead. 4856 // Returns number of grabbed goroutines. 4857 // Can be executed by any P. 4858 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 4859 for { 4860 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers 4861 t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer 4862 n := t - h 4863 n = n - n/2 4864 if n == 0 { 4865 if stealRunNextG { 4866 // Try to steal from _p_.runnext. 4867 if next := _p_.runnext; next != 0 { 4868 if _p_.status == _Prunning { 4869 // Sleep to ensure that _p_ isn't about to run the g 4870 // we are about to steal. 4871 // The important use case here is when the g running 4872 // on _p_ ready()s another g and then almost 4873 // immediately blocks. Instead of stealing runnext 4874 // in this window, back off to give _p_ a chance to 4875 // schedule runnext. This will avoid thrashing gs 4876 // between different Ps. 4877 // A sync chan send/recv takes ~50ns as of time of 4878 // writing, so 3us gives ~50x overshoot. 4879 if GOOS != "windows" { 4880 usleep(3) 4881 } else { 4882 // On windows system timer granularity is 4883 // 1-15ms, which is way too much for this 4884 // optimization. So just yield. 4885 osyield() 4886 } 4887 } 4888 if !_p_.runnext.cas(next, 0) { 4889 continue 4890 } 4891 batch[batchHead%uint32(len(batch))] = next 4892 return 1 4893 } 4894 } 4895 return 0 4896 } 4897 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 4898 continue 4899 } 4900 for i := uint32(0); i < n; i++ { 4901 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 4902 batch[(batchHead+i)%uint32(len(batch))] = g 4903 } 4904 if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4905 return n 4906 } 4907 } 4908 } 4909 4910 // Steal half of elements from local runnable queue of p2 4911 // and put onto local runnable queue of p. 4912 // Returns one of the stolen elements (or nil if failed). 4913 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 4914 t := _p_.runqtail 4915 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 4916 if n == 0 { 4917 return nil 4918 } 4919 n-- 4920 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 4921 if n == 0 { 4922 return gp 4923 } 4924 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers 4925 if t-h+n >= uint32(len(_p_.runq)) { 4926 throw("runqsteal: runq overflow") 4927 } 4928 atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 4929 return gp 4930 } 4931 4932 // A gQueue is a dequeue of Gs linked through g.schedlink. A G can only 4933 // be on one gQueue or gList at a time. 4934 type gQueue struct { 4935 head guintptr 4936 tail guintptr 4937 } 4938 4939 // empty reports whether q is empty. 4940 func (q *gQueue) empty() bool { 4941 return q.head == 0 4942 } 4943 4944 // push adds gp to the head of q. 4945 func (q *gQueue) push(gp *g) { 4946 gp.schedlink = q.head 4947 q.head.set(gp) 4948 if q.tail == 0 { 4949 q.tail.set(gp) 4950 } 4951 } 4952 4953 // pushBack adds gp to the tail of q. 4954 func (q *gQueue) pushBack(gp *g) { 4955 gp.schedlink = 0 4956 if q.tail != 0 { 4957 q.tail.ptr().schedlink.set(gp) 4958 } else { 4959 q.head.set(gp) 4960 } 4961 q.tail.set(gp) 4962 } 4963 4964 // pushBackAll adds all Gs in l2 to the tail of q. After this q2 must 4965 // not be used. 4966 func (q *gQueue) pushBackAll(q2 gQueue) { 4967 if q2.tail == 0 { 4968 return 4969 } 4970 q2.tail.ptr().schedlink = 0 4971 if q.tail != 0 { 4972 q.tail.ptr().schedlink = q2.head 4973 } else { 4974 q.head = q2.head 4975 } 4976 q.tail = q2.tail 4977 } 4978 4979 // pop removes and returns the head of queue q. It returns nil if 4980 // q is empty. 4981 func (q *gQueue) pop() *g { 4982 gp := q.head.ptr() 4983 if gp != nil { 4984 q.head = gp.schedlink 4985 if q.head == 0 { 4986 q.tail = 0 4987 } 4988 } 4989 return gp 4990 } 4991 4992 // popList takes all Gs in q and returns them as a gList. 4993 func (q *gQueue) popList() gList { 4994 stack := gList{q.head} 4995 *q = gQueue{} 4996 return stack 4997 } 4998 4999 // A gList is a list of Gs linked through g.schedlink. A G can only be 5000 // on one gQueue or gList at a time. 5001 type gList struct { 5002 head guintptr 5003 } 5004 5005 // empty reports whether l is empty. 5006 func (l *gList) empty() bool { 5007 return l.head == 0 5008 } 5009 5010 // push adds gp to the head of l. 5011 func (l *gList) push(gp *g) { 5012 gp.schedlink = l.head 5013 l.head.set(gp) 5014 } 5015 5016 // pushAll prepends all Gs in q to l. 5017 func (l *gList) pushAll(q gQueue) { 5018 if !q.empty() { 5019 q.tail.ptr().schedlink = l.head 5020 l.head = q.head 5021 } 5022 } 5023 5024 // pop removes and returns the head of l. If l is empty, it returns nil. 5025 func (l *gList) pop() *g { 5026 gp := l.head.ptr() 5027 if gp != nil { 5028 l.head = gp.schedlink 5029 } 5030 return gp 5031 } 5032 5033 //go:linkname setMaxThreads runtime/debug.setMaxThreads 5034 func setMaxThreads(in int) (out int) { 5035 lock(&sched.lock) 5036 out = int(sched.maxmcount) 5037 if in > 0x7fffffff { // MaxInt32 5038 sched.maxmcount = 0x7fffffff 5039 } else { 5040 sched.maxmcount = int32(in) 5041 } 5042 checkmcount() 5043 unlock(&sched.lock) 5044 return 5045 } 5046 5047 func haveexperiment(name string) bool { 5048 if name == "framepointer" { 5049 return framepointer_enabled // set by linker 5050 } 5051 x := sys.Goexperiment 5052 for x != "" { 5053 xname := "" 5054 i := index(x, ",") 5055 if i < 0 { 5056 xname, x = x, "" 5057 } else { 5058 xname, x = x[:i], x[i+1:] 5059 } 5060 if xname == name { 5061 return true 5062 } 5063 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name { 5064 return false 5065 } 5066 } 5067 return false 5068 } 5069 5070 //go:nosplit 5071 func procPin() int { 5072 _g_ := getg() 5073 mp := _g_.m 5074 5075 mp.locks++ 5076 return int(mp.p.ptr().id) 5077 } 5078 5079 //go:nosplit 5080 func procUnpin() { 5081 _g_ := getg() 5082 _g_.m.locks-- 5083 } 5084 5085 //go:linkname sync_runtime_procPin sync.runtime_procPin 5086 //go:nosplit 5087 func sync_runtime_procPin() int { 5088 return procPin() 5089 } 5090 5091 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 5092 //go:nosplit 5093 func sync_runtime_procUnpin() { 5094 procUnpin() 5095 } 5096 5097 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 5098 //go:nosplit 5099 func sync_atomic_runtime_procPin() int { 5100 return procPin() 5101 } 5102 5103 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 5104 //go:nosplit 5105 func sync_atomic_runtime_procUnpin() { 5106 procUnpin() 5107 } 5108 5109 // Active spinning for sync.Mutex. 5110 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 5111 //go:nosplit 5112 func sync_runtime_canSpin(i int) bool { 5113 // sync.Mutex is cooperative, so we are conservative with spinning. 5114 // Spin only few times and only if running on a multicore machine and 5115 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 5116 // As opposed to runtime mutex we don't do passive spinning here, 5117 // because there can be work on global runq or on other Ps. 5118 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 5119 return false 5120 } 5121 if p := getg().m.p.ptr(); !runqempty(p) { 5122 return false 5123 } 5124 return true 5125 } 5126 5127 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 5128 //go:nosplit 5129 func sync_runtime_doSpin() { 5130 procyield(active_spin_cnt) 5131 } 5132 5133 var stealOrder randomOrder 5134 5135 // randomOrder/randomEnum are helper types for randomized work stealing. 5136 // They allow to enumerate all Ps in different pseudo-random orders without repetitions. 5137 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS 5138 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration. 5139 type randomOrder struct { 5140 count uint32 5141 coprimes []uint32 5142 } 5143 5144 type randomEnum struct { 5145 i uint32 5146 count uint32 5147 pos uint32 5148 inc uint32 5149 } 5150 5151 func (ord *randomOrder) reset(count uint32) { 5152 ord.count = count 5153 ord.coprimes = ord.coprimes[:0] 5154 for i := uint32(1); i <= count; i++ { 5155 if gcd(i, count) == 1 { 5156 ord.coprimes = append(ord.coprimes, i) 5157 } 5158 } 5159 } 5160 5161 func (ord *randomOrder) start(i uint32) randomEnum { 5162 return randomEnum{ 5163 count: ord.count, 5164 pos: i % ord.count, 5165 inc: ord.coprimes[i%uint32(len(ord.coprimes))], 5166 } 5167 } 5168 5169 func (enum *randomEnum) done() bool { 5170 return enum.i == enum.count 5171 } 5172 5173 func (enum *randomEnum) next() { 5174 enum.i++ 5175 enum.pos = (enum.pos + enum.inc) % enum.count 5176 } 5177 5178 func (enum *randomEnum) position() uint32 { 5179 return enum.pos 5180 } 5181 5182 func gcd(a, b uint32) uint32 { 5183 for b != 0 { 5184 a, b = b, a%b 5185 } 5186 return a 5187 }