qoobing.com/gomod/log@v1.2.8/logid-runtime-patch/unknown/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/cpu" 9 "runtime/internal/atomic" 10 "runtime/internal/sys" 11 "unsafe" 12 ) 13 14 var buildVersion = sys.TheVersion 15 16 // set using cmd/go/internal/modload.ModInfoProg 17 var modinfo string 18 19 // Goroutine scheduler 20 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 21 // 22 // The main concepts are: 23 // G - goroutine. 24 // M - worker thread, or machine. 25 // P - processor, a resource that is required to execute Go code. 26 // M must have an associated P to execute Go code, however it can be 27 // blocked or in a syscall w/o an associated P. 28 // 29 // Design doc at https://golang.org/s/go11sched. 30 31 // Worker thread parking/unparking. 32 // We need to balance between keeping enough running worker threads to utilize 33 // available hardware parallelism and parking excessive running worker threads 34 // to conserve CPU resources and power. This is not simple for two reasons: 35 // (1) scheduler state is intentionally distributed (in particular, per-P work 36 // queues), so it is not possible to compute global predicates on fast paths; 37 // (2) for optimal thread management we would need to know the future (don't park 38 // a worker thread when a new goroutine will be readied in near future). 39 // 40 // Three rejected approaches that would work badly: 41 // 1. Centralize all scheduler state (would inhibit scalability). 42 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there 43 // is a spare P, unpark a thread and handoff it the thread and the goroutine. 44 // This would lead to thread state thrashing, as the thread that readied the 45 // goroutine can be out of work the very next moment, we will need to park it. 46 // Also, it would destroy locality of computation as we want to preserve 47 // dependent goroutines on the same thread; and introduce additional latency. 48 // 3. Unpark an additional thread whenever we ready a goroutine and there is an 49 // idle P, but don't do handoff. This would lead to excessive thread parking/ 50 // unparking as the additional threads will instantly park without discovering 51 // any work to do. 52 // 53 // The current approach: 54 // We unpark an additional thread when we ready a goroutine if (1) there is an 55 // idle P and there are no "spinning" worker threads. A worker thread is considered 56 // spinning if it is out of local work and did not find work in global run queue/ 57 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. 58 // Threads unparked this way are also considered spinning; we don't do goroutine 59 // handoff so such threads are out of work initially. Spinning threads do some 60 // spinning looking for work in per-P run queues before parking. If a spinning 61 // thread finds work it takes itself out of the spinning state and proceeds to 62 // execution. If it does not find work it takes itself out of the spinning state 63 // and then parks. 64 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark 65 // new threads when readying goroutines. To compensate for that, if the last spinning 66 // thread finds work and stops spinning, it must unpark a new spinning thread. 67 // This approach smooths out unjustified spikes of thread unparking, 68 // but at the same time guarantees eventual maximal CPU parallelism utilization. 69 // 70 // The main implementation complication is that we need to be very careful during 71 // spinning->non-spinning thread transition. This transition can race with submission 72 // of a new goroutine, and either one part or another needs to unpark another worker 73 // thread. If they both fail to do that, we can end up with semi-persistent CPU 74 // underutilization. The general pattern for goroutine readying is: submit a goroutine 75 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. 76 // The general pattern for spinning->non-spinning transition is: decrement nmspinning, 77 // #StoreLoad-style memory barrier, check all per-P work queues for new work. 78 // Note that all this complexity does not apply to global run queue as we are not 79 // sloppy about thread unparking when submitting to global queue. Also see comments 80 // for nmspinning manipulation. 81 82 var ( 83 m0 m 84 g0 g 85 raceprocctx0 uintptr 86 ) 87 88 //go:linkname runtime_inittask runtime..inittask 89 var runtime_inittask initTask 90 91 //go:linkname main_inittask main..inittask 92 var main_inittask initTask 93 94 // main_init_done is a signal used by cgocallbackg that initialization 95 // has been completed. It is made before _cgo_notify_runtime_init_done, 96 // so all cgo calls can rely on it existing. When main_init is complete, 97 // it is closed, meaning cgocallbackg can reliably receive from it. 98 var main_init_done chan bool 99 100 //go:linkname main_main main.main 101 func main_main() 102 103 // mainStarted indicates that the main M has started. 104 var mainStarted bool 105 106 // runtimeInitTime is the nanotime() at which the runtime started. 107 var runtimeInitTime int64 108 109 // Value to use for signal mask for newly created M's. 110 var initSigmask sigset 111 112 // The main goroutine. 113 func main() { 114 g := getg() 115 116 // Racectx of m0->g0 is used only as the parent of the main goroutine. 117 // It must not be used for anything else. 118 g.m.g0.racectx = 0 119 120 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 121 // Using decimal instead of binary GB and MB because 122 // they look nicer in the stack overflow failure message. 123 if sys.PtrSize == 8 { 124 maxstacksize = 1000000000 125 } else { 126 maxstacksize = 250000000 127 } 128 129 // Allow newproc to start new Ms. 130 mainStarted = true 131 132 if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon 133 systemstack(func() { 134 newm(sysmon, nil) 135 }) 136 } 137 138 // Lock the main goroutine onto this, the main OS thread, 139 // during initialization. Most programs won't care, but a few 140 // do require certain calls to be made by the main thread. 141 // Those can arrange for main.main to run in the main thread 142 // by calling runtime.LockOSThread during initialization 143 // to preserve the lock. 144 lockOSThread() 145 146 if g.m != &m0 { 147 throw("runtime.main not on m0") 148 } 149 150 doInit(&runtime_inittask) // must be before defer 151 if nanotime() == 0 { 152 throw("nanotime returning zero") 153 } 154 155 // Defer unlock so that runtime.Goexit during init does the unlock too. 156 needUnlock := true 157 defer func() { 158 if needUnlock { 159 unlockOSThread() 160 } 161 }() 162 163 // Record when the world started. 164 runtimeInitTime = nanotime() 165 166 gcenable() 167 168 main_init_done = make(chan bool) 169 if iscgo { 170 if _cgo_thread_start == nil { 171 throw("_cgo_thread_start missing") 172 } 173 if GOOS != "windows" { 174 if _cgo_setenv == nil { 175 throw("_cgo_setenv missing") 176 } 177 if _cgo_unsetenv == nil { 178 throw("_cgo_unsetenv missing") 179 } 180 } 181 if _cgo_notify_runtime_init_done == nil { 182 throw("_cgo_notify_runtime_init_done missing") 183 } 184 // Start the template thread in case we enter Go from 185 // a C-created thread and need to create a new thread. 186 startTemplateThread() 187 cgocall(_cgo_notify_runtime_init_done, nil) 188 } 189 190 doInit(&main_inittask) 191 192 close(main_init_done) 193 194 needUnlock = false 195 unlockOSThread() 196 197 if isarchive || islibrary { 198 // A program compiled with -buildmode=c-archive or c-shared 199 // has a main, but it is not executed. 200 return 201 } 202 fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 203 fn() 204 if raceenabled { 205 racefini() 206 } 207 208 // Make racy client program work: if panicking on 209 // another goroutine at the same time as main returns, 210 // let the other goroutine finish printing the panic trace. 211 // Once it does, it will exit. See issues 3934 and 20018. 212 if atomic.Load(&runningPanicDefers) != 0 { 213 // Running deferred functions should not take long. 214 for c := 0; c < 1000; c++ { 215 if atomic.Load(&runningPanicDefers) == 0 { 216 break 217 } 218 Gosched() 219 } 220 } 221 if atomic.Load(&panicking) != 0 { 222 gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1) 223 } 224 225 exit(0) 226 for { 227 var x *int32 228 *x = 0 229 } 230 } 231 232 // os_beforeExit is called from os.Exit(0). 233 //go:linkname os_beforeExit os.runtime_beforeExit 234 func os_beforeExit() { 235 if raceenabled { 236 racefini() 237 } 238 } 239 240 // start forcegc helper goroutine 241 func init() { 242 go forcegchelper() 243 } 244 245 func forcegchelper() { 246 forcegc.g = getg() 247 for { 248 lock(&forcegc.lock) 249 if forcegc.idle != 0 { 250 throw("forcegc: phase error") 251 } 252 atomic.Store(&forcegc.idle, 1) 253 goparkunlock(&forcegc.lock, waitReasonForceGGIdle, traceEvGoBlock, 1) 254 // this goroutine is explicitly resumed by sysmon 255 if debug.gctrace > 0 { 256 println("GC forced") 257 } 258 // Time-triggered, fully concurrent. 259 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()}) 260 } 261 } 262 263 //go:nosplit 264 265 // Gosched yields the processor, allowing other goroutines to run. It does not 266 // suspend the current goroutine, so execution resumes automatically. 267 func Gosched() { 268 checkTimeouts() 269 mcall(gosched_m) 270 } 271 272 // goschedguarded yields the processor like gosched, but also checks 273 // for forbidden states and opts out of the yield in those cases. 274 //go:nosplit 275 func goschedguarded() { 276 mcall(goschedguarded_m) 277 } 278 279 // Puts the current goroutine into a waiting state and calls unlockf. 280 // If unlockf returns false, the goroutine is resumed. 281 // unlockf must not access this G's stack, as it may be moved between 282 // the call to gopark and the call to unlockf. 283 // Reason explains why the goroutine has been parked. 284 // It is displayed in stack traces and heap dumps. 285 // Reasons should be unique and descriptive. 286 // Do not re-use reasons, add new ones. 287 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) { 288 if reason != waitReasonSleep { 289 checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy 290 } 291 mp := acquirem() 292 gp := mp.curg 293 status := readgstatus(gp) 294 if status != _Grunning && status != _Gscanrunning { 295 throw("gopark: bad g status") 296 } 297 mp.waitlock = lock 298 mp.waitunlockf = unlockf 299 gp.waitreason = reason 300 mp.waittraceev = traceEv 301 mp.waittraceskip = traceskip 302 releasem(mp) 303 // can't do anything that might move the G between Ms here. 304 mcall(park_m) 305 } 306 307 // Puts the current goroutine into a waiting state and unlocks the lock. 308 // The goroutine can be made runnable again by calling goready(gp). 309 func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) { 310 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 311 } 312 313 func goready(gp *g, traceskip int) { 314 systemstack(func() { 315 ready(gp, traceskip, true) 316 }) 317 } 318 319 //go:nosplit 320 func acquireSudog() *sudog { 321 // Delicate dance: the semaphore implementation calls 322 // acquireSudog, acquireSudog calls new(sudog), 323 // new calls malloc, malloc can call the garbage collector, 324 // and the garbage collector calls the semaphore implementation 325 // in stopTheWorld. 326 // Break the cycle by doing acquirem/releasem around new(sudog). 327 // The acquirem/releasem increments m.locks during new(sudog), 328 // which keeps the garbage collector from being invoked. 329 mp := acquirem() 330 pp := mp.p.ptr() 331 if len(pp.sudogcache) == 0 { 332 lock(&sched.sudoglock) 333 // First, try to grab a batch from central cache. 334 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 335 s := sched.sudogcache 336 sched.sudogcache = s.next 337 s.next = nil 338 pp.sudogcache = append(pp.sudogcache, s) 339 } 340 unlock(&sched.sudoglock) 341 // If the central cache is empty, allocate a new one. 342 if len(pp.sudogcache) == 0 { 343 pp.sudogcache = append(pp.sudogcache, new(sudog)) 344 } 345 } 346 n := len(pp.sudogcache) 347 s := pp.sudogcache[n-1] 348 pp.sudogcache[n-1] = nil 349 pp.sudogcache = pp.sudogcache[:n-1] 350 if s.elem != nil { 351 throw("acquireSudog: found s.elem != nil in cache") 352 } 353 releasem(mp) 354 return s 355 } 356 357 //go:nosplit 358 func releaseSudog(s *sudog) { 359 if s.elem != nil { 360 throw("runtime: sudog with non-nil elem") 361 } 362 if s.isSelect { 363 throw("runtime: sudog with non-false isSelect") 364 } 365 if s.next != nil { 366 throw("runtime: sudog with non-nil next") 367 } 368 if s.prev != nil { 369 throw("runtime: sudog with non-nil prev") 370 } 371 if s.waitlink != nil { 372 throw("runtime: sudog with non-nil waitlink") 373 } 374 if s.c != nil { 375 throw("runtime: sudog with non-nil c") 376 } 377 gp := getg() 378 if gp.param != nil { 379 throw("runtime: releaseSudog with non-nil gp.param") 380 } 381 mp := acquirem() // avoid rescheduling to another P 382 pp := mp.p.ptr() 383 if len(pp.sudogcache) == cap(pp.sudogcache) { 384 // Transfer half of local cache to the central cache. 385 var first, last *sudog 386 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 387 n := len(pp.sudogcache) 388 p := pp.sudogcache[n-1] 389 pp.sudogcache[n-1] = nil 390 pp.sudogcache = pp.sudogcache[:n-1] 391 if first == nil { 392 first = p 393 } else { 394 last.next = p 395 } 396 last = p 397 } 398 lock(&sched.sudoglock) 399 last.next = sched.sudogcache 400 sched.sudogcache = first 401 unlock(&sched.sudoglock) 402 } 403 pp.sudogcache = append(pp.sudogcache, s) 404 releasem(mp) 405 } 406 407 // funcPC returns the entry PC of the function f. 408 // It assumes that f is a func value. Otherwise the behavior is undefined. 409 // CAREFUL: In programs with plugins, funcPC can return different values 410 // for the same function (because there are actually multiple copies of 411 // the same function in the address space). To be safe, don't use the 412 // results of this function in any == expression. It is only safe to 413 // use the result as an address at which to start executing code. 414 //go:nosplit 415 func funcPC(f interface{}) uintptr { 416 return *(*uintptr)(efaceOf(&f).data) 417 } 418 419 // called from assembly 420 func badmcall(fn func(*g)) { 421 throw("runtime: mcall called on m->g0 stack") 422 } 423 424 func badmcall2(fn func(*g)) { 425 throw("runtime: mcall function returned") 426 } 427 428 func badreflectcall() { 429 panic(plainError("arg size to reflect.call more than 1GB")) 430 } 431 432 var badmorestackg0Msg = "fatal: morestack on g0\n" 433 434 //go:nosplit 435 //go:nowritebarrierrec 436 func badmorestackg0() { 437 sp := stringStructOf(&badmorestackg0Msg) 438 write(2, sp.str, int32(sp.len)) 439 } 440 441 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n" 442 443 //go:nosplit 444 //go:nowritebarrierrec 445 func badmorestackgsignal() { 446 sp := stringStructOf(&badmorestackgsignalMsg) 447 write(2, sp.str, int32(sp.len)) 448 } 449 450 //go:nosplit 451 func badctxt() { 452 throw("ctxt != 0") 453 } 454 455 func lockedOSThread() bool { 456 gp := getg() 457 return gp.lockedm != 0 && gp.m.lockedg != 0 458 } 459 460 var ( 461 allgs []*g 462 allglock mutex 463 ) 464 465 func allgadd(gp *g) { 466 if readgstatus(gp) == _Gidle { 467 throw("allgadd: bad status Gidle") 468 } 469 470 lock(&allglock) 471 allgs = append(allgs, gp) 472 allglen = uintptr(len(allgs)) 473 unlock(&allglock) 474 } 475 476 const ( 477 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 478 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 479 _GoidCacheBatch = 16 480 ) 481 482 // cpuinit extracts the environment variable GODEBUG from the environment on 483 // Unix-like operating systems and calls internal/cpu.Initialize. 484 func cpuinit() { 485 const prefix = "GODEBUG=" 486 var env string 487 488 switch GOOS { 489 case "aix", "darwin", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux": 490 cpu.DebugOptions = true 491 492 // Similar to goenv_unix but extracts the environment value for 493 // GODEBUG directly. 494 // TODO(moehrmann): remove when general goenvs() can be called before cpuinit() 495 n := int32(0) 496 for argv_index(argv, argc+1+n) != nil { 497 n++ 498 } 499 500 for i := int32(0); i < n; i++ { 501 p := argv_index(argv, argc+1+i) 502 s := *(*string)(unsafe.Pointer(&stringStruct{unsafe.Pointer(p), findnull(p)})) 503 504 if hasPrefix(s, prefix) { 505 env = gostring(p)[len(prefix):] 506 break 507 } 508 } 509 } 510 511 cpu.Initialize(env) 512 513 // Support cpu feature variables are used in code generated by the compiler 514 // to guard execution of instructions that can not be assumed to be always supported. 515 x86HasPOPCNT = cpu.X86.HasPOPCNT 516 x86HasSSE41 = cpu.X86.HasSSE41 517 x86HasFMA = cpu.X86.HasFMA 518 519 armHasVFPv4 = cpu.ARM.HasVFPv4 520 521 arm64HasATOMICS = cpu.ARM64.HasATOMICS 522 } 523 524 // The bootstrap sequence is: 525 // 526 // call osinit 527 // call schedinit 528 // make & queue new G 529 // call runtime·mstart 530 // 531 // The new G calls runtime·main. 532 func schedinit() { 533 // raceinit must be the first call to race detector. 534 // In particular, it must be done before mallocinit below calls racemapshadow. 535 _g_ := getg() 536 if raceenabled { 537 _g_.racectx, raceprocctx0 = raceinit() 538 } 539 540 sched.maxmcount = 10000 541 542 tracebackinit() 543 moduledataverify() 544 stackinit() 545 mallocinit() 546 fastrandinit() // must run before mcommoninit 547 mcommoninit(_g_.m) 548 cpuinit() // must run before alginit 549 alginit() // maps must not be used before this call 550 modulesinit() // provides activeModules 551 typelinksinit() // uses maps, activeModules 552 itabsinit() // uses activeModules 553 554 msigsave(_g_.m) 555 initSigmask = _g_.m.sigmask 556 557 goargs() 558 goenvs() 559 parsedebugvars() 560 gcinit() 561 562 sched.lastpoll = uint64(nanotime()) 563 procs := ncpu 564 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { 565 procs = n 566 } 567 if procresize(procs) != nil { 568 throw("unknown runnable goroutine during bootstrap") 569 } 570 571 // For cgocheck > 1, we turn on the write barrier at all times 572 // and check all pointer writes. We can't do this until after 573 // procresize because the write barrier needs a P. 574 if debug.cgocheck > 1 { 575 writeBarrier.cgo = true 576 writeBarrier.enabled = true 577 for _, p := range allp { 578 p.wbBuf.reset() 579 } 580 } 581 582 if buildVersion == "" { 583 // Condition should never trigger. This code just serves 584 // to ensure runtime·buildVersion is kept in the resulting binary. 585 buildVersion = "unknown" 586 } 587 if len(modinfo) == 1 { 588 // Condition should never trigger. This code just serves 589 // to ensure runtime·modinfo is kept in the resulting binary. 590 modinfo = "" 591 } 592 } 593 594 func dumpgstatus(gp *g) { 595 _g_ := getg() 596 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 597 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 598 } 599 600 func checkmcount() { 601 // sched lock is held 602 if mcount() > sched.maxmcount { 603 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 604 throw("thread exhaustion") 605 } 606 } 607 608 func mcommoninit(mp *m) { 609 _g_ := getg() 610 611 // g0 stack won't make sense for user (and is not necessary unwindable). 612 if _g_ != _g_.m.g0 { 613 callers(1, mp.createstack[:]) 614 } 615 616 lock(&sched.lock) 617 if sched.mnext+1 < sched.mnext { 618 throw("runtime: thread ID overflow") 619 } 620 mp.id = sched.mnext 621 sched.mnext++ 622 checkmcount() 623 624 mp.fastrand[0] = uint32(int64Hash(uint64(mp.id), fastrandseed)) 625 mp.fastrand[1] = uint32(int64Hash(uint64(cputicks()), ^fastrandseed)) 626 if mp.fastrand[0]|mp.fastrand[1] == 0 { 627 mp.fastrand[1] = 1 628 } 629 630 mpreinit(mp) 631 if mp.gsignal != nil { 632 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 633 } 634 635 // Add to allm so garbage collector doesn't free g->m 636 // when it is just in a register or thread-local storage. 637 mp.alllink = allm 638 639 // NumCgoCall() iterates over allm w/o schedlock, 640 // so we need to publish it safely. 641 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 642 unlock(&sched.lock) 643 644 // Allocate memory to hold a cgo traceback if the cgo call crashes. 645 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" { 646 mp.cgoCallers = new(cgoCallers) 647 } 648 } 649 650 var fastrandseed uintptr 651 652 func fastrandinit() { 653 s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:] 654 getRandomData(s) 655 } 656 657 // Mark gp ready to run. 658 func ready(gp *g, traceskip int, next bool) { 659 if trace.enabled { 660 traceGoUnpark(gp, traceskip) 661 } 662 663 status := readgstatus(gp) 664 665 // Mark runnable. 666 _g_ := getg() 667 mp := acquirem() // disable preemption because it can be holding p in a local var 668 if status&^_Gscan != _Gwaiting { 669 dumpgstatus(gp) 670 throw("bad g->status in ready") 671 } 672 673 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 674 casgstatus(gp, _Gwaiting, _Grunnable) 675 runqput(_g_.m.p.ptr(), gp, next) 676 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 677 wakep() 678 } 679 releasem(mp) 680 } 681 682 // freezeStopWait is a large value that freezetheworld sets 683 // sched.stopwait to in order to request that all Gs permanently stop. 684 const freezeStopWait = 0x7fffffff 685 686 // freezing is set to non-zero if the runtime is trying to freeze the 687 // world. 688 var freezing uint32 689 690 // Similar to stopTheWorld but best-effort and can be called several times. 691 // There is no reverse operation, used during crashing. 692 // This function must not lock any mutexes. 693 func freezetheworld() { 694 atomic.Store(&freezing, 1) 695 // stopwait and preemption requests can be lost 696 // due to races with concurrently executing threads, 697 // so try several times 698 for i := 0; i < 5; i++ { 699 // this should tell the scheduler to not start any new goroutines 700 sched.stopwait = freezeStopWait 701 atomic.Store(&sched.gcwaiting, 1) 702 // this should stop running goroutines 703 if !preemptall() { 704 break // no running goroutines 705 } 706 usleep(1000) 707 } 708 // to be sure 709 usleep(1000) 710 preemptall() 711 usleep(1000) 712 } 713 714 // All reads and writes of g's status go through readgstatus, casgstatus 715 // castogscanstatus, casfrom_Gscanstatus. 716 //go:nosplit 717 func readgstatus(gp *g) uint32 { 718 return atomic.Load(&gp.atomicstatus) 719 } 720 721 // The Gscanstatuses are acting like locks and this releases them. 722 // If it proves to be a performance hit we should be able to make these 723 // simple atomic stores but for now we are going to throw if 724 // we see an inconsistent state. 725 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 726 success := false 727 728 // Check that transition is valid. 729 switch oldval { 730 default: 731 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 732 dumpgstatus(gp) 733 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 734 case _Gscanrunnable, 735 _Gscanwaiting, 736 _Gscanrunning, 737 _Gscansyscall, 738 _Gscanpreempted: 739 if newval == oldval&^_Gscan { 740 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 741 } 742 } 743 if !success { 744 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 745 dumpgstatus(gp) 746 throw("casfrom_Gscanstatus: gp->status is not in scan state") 747 } 748 } 749 750 // This will return false if the gp is not in the expected status and the cas fails. 751 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 752 func castogscanstatus(gp *g, oldval, newval uint32) bool { 753 switch oldval { 754 case _Grunnable, 755 _Grunning, 756 _Gwaiting, 757 _Gsyscall: 758 if newval == oldval|_Gscan { 759 return atomic.Cas(&gp.atomicstatus, oldval, newval) 760 } 761 } 762 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 763 throw("castogscanstatus") 764 panic("not reached") 765 } 766 767 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 768 // and casfrom_Gscanstatus instead. 769 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 770 // put it in the Gscan state is finished. 771 //go:nosplit 772 func casgstatus(gp *g, oldval, newval uint32) { 773 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 774 systemstack(func() { 775 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 776 throw("casgstatus: bad incoming values") 777 }) 778 } 779 780 // See https://golang.org/cl/21503 for justification of the yield delay. 781 const yieldDelay = 5 * 1000 782 var nextYield int64 783 784 // loop if gp->atomicstatus is in a scan state giving 785 // GC time to finish and change the state to oldval. 786 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { 787 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 788 throw("casgstatus: waiting for Gwaiting but is Grunnable") 789 } 790 if i == 0 { 791 nextYield = nanotime() + yieldDelay 792 } 793 if nanotime() < nextYield { 794 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { 795 procyield(1) 796 } 797 } else { 798 osyield() 799 nextYield = nanotime() + yieldDelay/2 800 } 801 } 802 } 803 804 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 805 // Returns old status. Cannot call casgstatus directly, because we are racing with an 806 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 807 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 808 // it would loop waiting for the status to go back to Gwaiting, which it never will. 809 //go:nosplit 810 func casgcopystack(gp *g) uint32 { 811 for { 812 oldstatus := readgstatus(gp) &^ _Gscan 813 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 814 throw("copystack: bad status, not Gwaiting or Grunnable") 815 } 816 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 817 return oldstatus 818 } 819 } 820 } 821 822 // casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted. 823 // 824 // TODO(austin): This is the only status operation that both changes 825 // the status and locks the _Gscan bit. Rethink this. 826 func casGToPreemptScan(gp *g, old, new uint32) { 827 if old != _Grunning || new != _Gscan|_Gpreempted { 828 throw("bad g transition") 829 } 830 for !atomic.Cas(&gp.atomicstatus, _Grunning, _Gscan|_Gpreempted) { 831 } 832 } 833 834 // casGFromPreempted attempts to transition gp from _Gpreempted to 835 // _Gwaiting. If successful, the caller is responsible for 836 // re-scheduling gp. 837 func casGFromPreempted(gp *g, old, new uint32) bool { 838 if old != _Gpreempted || new != _Gwaiting { 839 throw("bad g transition") 840 } 841 return atomic.Cas(&gp.atomicstatus, _Gpreempted, _Gwaiting) 842 } 843 844 // stopTheWorld stops all P's from executing goroutines, interrupting 845 // all goroutines at GC safe points and records reason as the reason 846 // for the stop. On return, only the current goroutine's P is running. 847 // stopTheWorld must not be called from a system stack and the caller 848 // must not hold worldsema. The caller must call startTheWorld when 849 // other P's should resume execution. 850 // 851 // stopTheWorld is safe for multiple goroutines to call at the 852 // same time. Each will execute its own stop, and the stops will 853 // be serialized. 854 // 855 // This is also used by routines that do stack dumps. If the system is 856 // in panic or being exited, this may not reliably stop all 857 // goroutines. 858 func stopTheWorld(reason string) { 859 semacquire(&worldsema) 860 getg().m.preemptoff = reason 861 systemstack(stopTheWorldWithSema) 862 } 863 864 // startTheWorld undoes the effects of stopTheWorld. 865 func startTheWorld() { 866 systemstack(func() { startTheWorldWithSema(false) }) 867 // worldsema must be held over startTheWorldWithSema to ensure 868 // gomaxprocs cannot change while worldsema is held. 869 semrelease(&worldsema) 870 getg().m.preemptoff = "" 871 } 872 873 // Holding worldsema grants an M the right to try to stop the world 874 // and prevents gomaxprocs from changing concurrently. 875 var worldsema uint32 = 1 876 877 // stopTheWorldWithSema is the core implementation of stopTheWorld. 878 // The caller is responsible for acquiring worldsema and disabling 879 // preemption first and then should stopTheWorldWithSema on the system 880 // stack: 881 // 882 // semacquire(&worldsema, 0) 883 // m.preemptoff = "reason" 884 // systemstack(stopTheWorldWithSema) 885 // 886 // When finished, the caller must either call startTheWorld or undo 887 // these three operations separately: 888 // 889 // m.preemptoff = "" 890 // systemstack(startTheWorldWithSema) 891 // semrelease(&worldsema) 892 // 893 // It is allowed to acquire worldsema once and then execute multiple 894 // startTheWorldWithSema/stopTheWorldWithSema pairs. 895 // Other P's are able to execute between successive calls to 896 // startTheWorldWithSema and stopTheWorldWithSema. 897 // Holding worldsema causes any other goroutines invoking 898 // stopTheWorld to block. 899 func stopTheWorldWithSema() { 900 _g_ := getg() 901 902 // If we hold a lock, then we won't be able to stop another M 903 // that is blocked trying to acquire the lock. 904 if _g_.m.locks > 0 { 905 throw("stopTheWorld: holding locks") 906 } 907 908 lock(&sched.lock) 909 sched.stopwait = gomaxprocs 910 atomic.Store(&sched.gcwaiting, 1) 911 preemptall() 912 // stop current P 913 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 914 sched.stopwait-- 915 // try to retake all P's in Psyscall status 916 for _, p := range allp { 917 s := p.status 918 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 919 if trace.enabled { 920 traceGoSysBlock(p) 921 traceProcStop(p) 922 } 923 p.syscalltick++ 924 sched.stopwait-- 925 } 926 } 927 // stop idle P's 928 for { 929 p := pidleget() 930 if p == nil { 931 break 932 } 933 p.status = _Pgcstop 934 sched.stopwait-- 935 } 936 wait := sched.stopwait > 0 937 unlock(&sched.lock) 938 939 // wait for remaining P's to stop voluntarily 940 if wait { 941 for { 942 // wait for 100us, then try to re-preempt in case of any races 943 if notetsleep(&sched.stopnote, 100*1000) { 944 noteclear(&sched.stopnote) 945 break 946 } 947 preemptall() 948 } 949 } 950 951 // sanity checks 952 bad := "" 953 if sched.stopwait != 0 { 954 bad = "stopTheWorld: not stopped (stopwait != 0)" 955 } else { 956 for _, p := range allp { 957 if p.status != _Pgcstop { 958 bad = "stopTheWorld: not stopped (status != _Pgcstop)" 959 } 960 } 961 } 962 if atomic.Load(&freezing) != 0 { 963 // Some other thread is panicking. This can cause the 964 // sanity checks above to fail if the panic happens in 965 // the signal handler on a stopped thread. Either way, 966 // we should halt this thread. 967 lock(&deadlock) 968 lock(&deadlock) 969 } 970 if bad != "" { 971 throw(bad) 972 } 973 } 974 975 func startTheWorldWithSema(emitTraceEvent bool) int64 { 976 mp := acquirem() // disable preemption because it can be holding p in a local var 977 if netpollinited() { 978 list := netpoll(0) // non-blocking 979 injectglist(&list) 980 } 981 lock(&sched.lock) 982 983 procs := gomaxprocs 984 if newprocs != 0 { 985 procs = newprocs 986 newprocs = 0 987 } 988 p1 := procresize(procs) 989 sched.gcwaiting = 0 990 if sched.sysmonwait != 0 { 991 sched.sysmonwait = 0 992 notewakeup(&sched.sysmonnote) 993 } 994 unlock(&sched.lock) 995 996 for p1 != nil { 997 p := p1 998 p1 = p1.link.ptr() 999 if p.m != 0 { 1000 mp := p.m.ptr() 1001 p.m = 0 1002 if mp.nextp != 0 { 1003 throw("startTheWorld: inconsistent mp->nextp") 1004 } 1005 mp.nextp.set(p) 1006 notewakeup(&mp.park) 1007 } else { 1008 // Start M to run P. Do not start another M below. 1009 newm(nil, p) 1010 } 1011 } 1012 1013 // Capture start-the-world time before doing clean-up tasks. 1014 startTime := nanotime() 1015 if emitTraceEvent { 1016 traceGCSTWDone() 1017 } 1018 1019 // Wakeup an additional proc in case we have excessive runnable goroutines 1020 // in local queues or in the global queue. If we don't, the proc will park itself. 1021 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 1022 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 1023 wakep() 1024 } 1025 1026 releasem(mp) 1027 1028 return startTime 1029 } 1030 1031 // mstart is the entry-point for new Ms. 1032 // 1033 // This must not split the stack because we may not even have stack 1034 // bounds set up yet. 1035 // 1036 // May run during STW (because it doesn't have a P yet), so write 1037 // barriers are not allowed. 1038 // 1039 //go:nosplit 1040 //go:nowritebarrierrec 1041 func mstart() { 1042 _g_ := getg() 1043 1044 osStack := _g_.stack.lo == 0 1045 if osStack { 1046 // Initialize stack bounds from system stack. 1047 // Cgo may have left stack size in stack.hi. 1048 // minit may update the stack bounds. 1049 size := _g_.stack.hi 1050 if size == 0 { 1051 size = 8192 * sys.StackGuardMultiplier 1052 } 1053 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1054 _g_.stack.lo = _g_.stack.hi - size + 1024 1055 } 1056 // Initialize stack guard so that we can start calling regular 1057 // Go code. 1058 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1059 // This is the g0, so we can also call go:systemstack 1060 // functions, which check stackguard1. 1061 _g_.stackguard1 = _g_.stackguard0 1062 mstart1() 1063 1064 // Exit this thread. 1065 switch GOOS { 1066 case "windows", "solaris", "illumos", "plan9", "darwin", "aix": 1067 // Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate 1068 // the stack, but put it in _g_.stack before mstart, 1069 // so the logic above hasn't set osStack yet. 1070 osStack = true 1071 } 1072 mexit(osStack) 1073 } 1074 1075 func mstart1() { 1076 _g_ := getg() 1077 1078 if _g_ != _g_.m.g0 { 1079 throw("bad runtime·mstart") 1080 } 1081 1082 // Record the caller for use as the top of stack in mcall and 1083 // for terminating the thread. 1084 // We're never coming back to mstart1 after we call schedule, 1085 // so other calls can reuse the current frame. 1086 save(getcallerpc(), getcallersp()) 1087 asminit() 1088 minit() 1089 1090 // Install signal handlers; after minit so that minit can 1091 // prepare the thread to be able to handle the signals. 1092 if _g_.m == &m0 { 1093 mstartm0() 1094 } 1095 1096 if fn := _g_.m.mstartfn; fn != nil { 1097 fn() 1098 } 1099 1100 if _g_.m != &m0 { 1101 acquirep(_g_.m.nextp.ptr()) 1102 _g_.m.nextp = 0 1103 } 1104 schedule() 1105 } 1106 1107 // mstartm0 implements part of mstart1 that only runs on the m0. 1108 // 1109 // Write barriers are allowed here because we know the GC can't be 1110 // running yet, so they'll be no-ops. 1111 // 1112 //go:yeswritebarrierrec 1113 func mstartm0() { 1114 // Create an extra M for callbacks on threads not created by Go. 1115 // An extra M is also needed on Windows for callbacks created by 1116 // syscall.NewCallback. See issue #6751 for details. 1117 if (iscgo || GOOS == "windows") && !cgoHasExtraM { 1118 cgoHasExtraM = true 1119 newextram() 1120 } 1121 initsig(false) 1122 } 1123 1124 // mexit tears down and exits the current thread. 1125 // 1126 // Don't call this directly to exit the thread, since it must run at 1127 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to 1128 // unwind the stack to the point that exits the thread. 1129 // 1130 // It is entered with m.p != nil, so write barriers are allowed. It 1131 // will release the P before exiting. 1132 // 1133 //go:yeswritebarrierrec 1134 func mexit(osStack bool) { 1135 g := getg() 1136 m := g.m 1137 1138 if m == &m0 { 1139 // This is the main thread. Just wedge it. 1140 // 1141 // On Linux, exiting the main thread puts the process 1142 // into a non-waitable zombie state. On Plan 9, 1143 // exiting the main thread unblocks wait even though 1144 // other threads are still running. On Solaris we can 1145 // neither exitThread nor return from mstart. Other 1146 // bad things probably happen on other platforms. 1147 // 1148 // We could try to clean up this M more before wedging 1149 // it, but that complicates signal handling. 1150 handoffp(releasep()) 1151 lock(&sched.lock) 1152 sched.nmfreed++ 1153 checkdead() 1154 unlock(&sched.lock) 1155 notesleep(&m.park) 1156 throw("locked m0 woke up") 1157 } 1158 1159 sigblock() 1160 unminit() 1161 1162 // Free the gsignal stack. 1163 if m.gsignal != nil { 1164 stackfree(m.gsignal.stack) 1165 // On some platforms, when calling into VDSO (e.g. nanotime) 1166 // we store our g on the gsignal stack, if there is one. 1167 // Now the stack is freed, unlink it from the m, so we 1168 // won't write to it when calling VDSO code. 1169 m.gsignal = nil 1170 } 1171 1172 // Remove m from allm. 1173 lock(&sched.lock) 1174 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink { 1175 if *pprev == m { 1176 *pprev = m.alllink 1177 goto found 1178 } 1179 } 1180 throw("m not found in allm") 1181 found: 1182 if !osStack { 1183 // Delay reaping m until it's done with the stack. 1184 // 1185 // If this is using an OS stack, the OS will free it 1186 // so there's no need for reaping. 1187 atomic.Store(&m.freeWait, 1) 1188 // Put m on the free list, though it will not be reaped until 1189 // freeWait is 0. Note that the free list must not be linked 1190 // through alllink because some functions walk allm without 1191 // locking, so may be using alllink. 1192 m.freelink = sched.freem 1193 sched.freem = m 1194 } 1195 unlock(&sched.lock) 1196 1197 // Release the P. 1198 handoffp(releasep()) 1199 // After this point we must not have write barriers. 1200 1201 // Invoke the deadlock detector. This must happen after 1202 // handoffp because it may have started a new M to take our 1203 // P's work. 1204 lock(&sched.lock) 1205 sched.nmfreed++ 1206 checkdead() 1207 unlock(&sched.lock) 1208 1209 if osStack { 1210 // Return from mstart and let the system thread 1211 // library free the g0 stack and terminate the thread. 1212 return 1213 } 1214 1215 // mstart is the thread's entry point, so there's nothing to 1216 // return to. Exit the thread directly. exitThread will clear 1217 // m.freeWait when it's done with the stack and the m can be 1218 // reaped. 1219 exitThread(&m.freeWait) 1220 } 1221 1222 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1223 // If a P is currently executing code, this will bring the P to a GC 1224 // safe point and execute fn on that P. If the P is not executing code 1225 // (it is idle or in a syscall), this will call fn(p) directly while 1226 // preventing the P from exiting its state. This does not ensure that 1227 // fn will run on every CPU executing Go code, but it acts as a global 1228 // memory barrier. GC uses this as a "ragged barrier." 1229 // 1230 // The caller must hold worldsema. 1231 // 1232 //go:systemstack 1233 func forEachP(fn func(*p)) { 1234 mp := acquirem() 1235 _p_ := getg().m.p.ptr() 1236 1237 lock(&sched.lock) 1238 if sched.safePointWait != 0 { 1239 throw("forEachP: sched.safePointWait != 0") 1240 } 1241 sched.safePointWait = gomaxprocs - 1 1242 sched.safePointFn = fn 1243 1244 // Ask all Ps to run the safe point function. 1245 for _, p := range allp { 1246 if p != _p_ { 1247 atomic.Store(&p.runSafePointFn, 1) 1248 } 1249 } 1250 preemptall() 1251 1252 // Any P entering _Pidle or _Psyscall from now on will observe 1253 // p.runSafePointFn == 1 and will call runSafePointFn when 1254 // changing its status to _Pidle/_Psyscall. 1255 1256 // Run safe point function for all idle Ps. sched.pidle will 1257 // not change because we hold sched.lock. 1258 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1259 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1260 fn(p) 1261 sched.safePointWait-- 1262 } 1263 } 1264 1265 wait := sched.safePointWait > 0 1266 unlock(&sched.lock) 1267 1268 // Run fn for the current P. 1269 fn(_p_) 1270 1271 // Force Ps currently in _Psyscall into _Pidle and hand them 1272 // off to induce safe point function execution. 1273 for _, p := range allp { 1274 s := p.status 1275 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1276 if trace.enabled { 1277 traceGoSysBlock(p) 1278 traceProcStop(p) 1279 } 1280 p.syscalltick++ 1281 handoffp(p) 1282 } 1283 } 1284 1285 // Wait for remaining Ps to run fn. 1286 if wait { 1287 for { 1288 // Wait for 100us, then try to re-preempt in 1289 // case of any races. 1290 // 1291 // Requires system stack. 1292 if notetsleep(&sched.safePointNote, 100*1000) { 1293 noteclear(&sched.safePointNote) 1294 break 1295 } 1296 preemptall() 1297 } 1298 } 1299 if sched.safePointWait != 0 { 1300 throw("forEachP: not done") 1301 } 1302 for _, p := range allp { 1303 if p.runSafePointFn != 0 { 1304 throw("forEachP: P did not run fn") 1305 } 1306 } 1307 1308 lock(&sched.lock) 1309 sched.safePointFn = nil 1310 unlock(&sched.lock) 1311 releasem(mp) 1312 } 1313 1314 // runSafePointFn runs the safe point function, if any, for this P. 1315 // This should be called like 1316 // 1317 // if getg().m.p.runSafePointFn != 0 { 1318 // runSafePointFn() 1319 // } 1320 // 1321 // runSafePointFn must be checked on any transition in to _Pidle or 1322 // _Psyscall to avoid a race where forEachP sees that the P is running 1323 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1324 // nor the P run the safe-point function. 1325 func runSafePointFn() { 1326 p := getg().m.p.ptr() 1327 // Resolve the race between forEachP running the safe-point 1328 // function on this P's behalf and this P running the 1329 // safe-point function directly. 1330 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1331 return 1332 } 1333 sched.safePointFn(p) 1334 lock(&sched.lock) 1335 sched.safePointWait-- 1336 if sched.safePointWait == 0 { 1337 notewakeup(&sched.safePointNote) 1338 } 1339 unlock(&sched.lock) 1340 } 1341 1342 // When running with cgo, we call _cgo_thread_start 1343 // to start threads for us so that we can play nicely with 1344 // foreign code. 1345 var cgoThreadStart unsafe.Pointer 1346 1347 type cgothreadstart struct { 1348 g guintptr 1349 tls *uint64 1350 fn unsafe.Pointer 1351 } 1352 1353 // Allocate a new m unassociated with any thread. 1354 // Can use p for allocation context if needed. 1355 // fn is recorded as the new m's m.mstartfn. 1356 // 1357 // This function is allowed to have write barriers even if the caller 1358 // isn't because it borrows _p_. 1359 // 1360 //go:yeswritebarrierrec 1361 func allocm(_p_ *p, fn func()) *m { 1362 _g_ := getg() 1363 acquirem() // disable GC because it can be called from sysmon 1364 if _g_.m.p == 0 { 1365 acquirep(_p_) // temporarily borrow p for mallocs in this function 1366 } 1367 1368 // Release the free M list. We need to do this somewhere and 1369 // this may free up a stack we can use. 1370 if sched.freem != nil { 1371 lock(&sched.lock) 1372 var newList *m 1373 for freem := sched.freem; freem != nil; { 1374 if freem.freeWait != 0 { 1375 next := freem.freelink 1376 freem.freelink = newList 1377 newList = freem 1378 freem = next 1379 continue 1380 } 1381 stackfree(freem.g0.stack) 1382 freem = freem.freelink 1383 } 1384 sched.freem = newList 1385 unlock(&sched.lock) 1386 } 1387 1388 mp := new(m) 1389 mp.mstartfn = fn 1390 mcommoninit(mp) 1391 1392 // In case of cgo or Solaris or illumos or Darwin, pthread_create will make us a stack. 1393 // Windows and Plan 9 will layout sched stack on OS stack. 1394 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" || GOOS == "plan9" || GOOS == "darwin" { 1395 mp.g0 = malg(-1) 1396 } else { 1397 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1398 } 1399 mp.g0.m = mp 1400 1401 if _p_ == _g_.m.p.ptr() { 1402 releasep() 1403 } 1404 releasem(_g_.m) 1405 1406 return mp 1407 } 1408 1409 // needm is called when a cgo callback happens on a 1410 // thread without an m (a thread not created by Go). 1411 // In this case, needm is expected to find an m to use 1412 // and return with m, g initialized correctly. 1413 // Since m and g are not set now (likely nil, but see below) 1414 // needm is limited in what routines it can call. In particular 1415 // it can only call nosplit functions (textflag 7) and cannot 1416 // do any scheduling that requires an m. 1417 // 1418 // In order to avoid needing heavy lifting here, we adopt 1419 // the following strategy: there is a stack of available m's 1420 // that can be stolen. Using compare-and-swap 1421 // to pop from the stack has ABA races, so we simulate 1422 // a lock by doing an exchange (via Casuintptr) to steal the stack 1423 // head and replace the top pointer with MLOCKED (1). 1424 // This serves as a simple spin lock that we can use even 1425 // without an m. The thread that locks the stack in this way 1426 // unlocks the stack by storing a valid stack head pointer. 1427 // 1428 // In order to make sure that there is always an m structure 1429 // available to be stolen, we maintain the invariant that there 1430 // is always one more than needed. At the beginning of the 1431 // program (if cgo is in use) the list is seeded with a single m. 1432 // If needm finds that it has taken the last m off the list, its job 1433 // is - once it has installed its own m so that it can do things like 1434 // allocate memory - to create a spare m and put it on the list. 1435 // 1436 // Each of these extra m's also has a g0 and a curg that are 1437 // pressed into service as the scheduling stack and current 1438 // goroutine for the duration of the cgo callback. 1439 // 1440 // When the callback is done with the m, it calls dropm to 1441 // put the m back on the list. 1442 //go:nosplit 1443 func needm(x byte) { 1444 if (iscgo || GOOS == "windows") && !cgoHasExtraM { 1445 // Can happen if C/C++ code calls Go from a global ctor. 1446 // Can also happen on Windows if a global ctor uses a 1447 // callback created by syscall.NewCallback. See issue #6751 1448 // for details. 1449 // 1450 // Can not throw, because scheduler is not initialized yet. 1451 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1452 exit(1) 1453 } 1454 1455 // Lock extra list, take head, unlock popped list. 1456 // nilokay=false is safe here because of the invariant above, 1457 // that the extra list always contains or will soon contain 1458 // at least one m. 1459 mp := lockextra(false) 1460 1461 // Set needextram when we've just emptied the list, 1462 // so that the eventual call into cgocallbackg will 1463 // allocate a new m for the extra list. We delay the 1464 // allocation until then so that it can be done 1465 // after exitsyscall makes sure it is okay to be 1466 // running at all (that is, there's no garbage collection 1467 // running right now). 1468 mp.needextram = mp.schedlink == 0 1469 extraMCount-- 1470 unlockextra(mp.schedlink.ptr()) 1471 1472 // Save and block signals before installing g. 1473 // Once g is installed, any incoming signals will try to execute, 1474 // but we won't have the sigaltstack settings and other data 1475 // set up appropriately until the end of minit, which will 1476 // unblock the signals. This is the same dance as when 1477 // starting a new m to run Go code via newosproc. 1478 msigsave(mp) 1479 sigblock() 1480 1481 // Install g (= m->g0) and set the stack bounds 1482 // to match the current stack. We don't actually know 1483 // how big the stack is, like we don't know how big any 1484 // scheduling stack is, but we assume there's at least 32 kB, 1485 // which is more than enough for us. 1486 setg(mp.g0) 1487 _g_ := getg() 1488 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 1489 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 1490 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1491 1492 // Initialize this thread to use the m. 1493 asminit() 1494 minit() 1495 1496 // mp.curg is now a real goroutine. 1497 casgstatus(mp.curg, _Gdead, _Gsyscall) 1498 atomic.Xadd(&sched.ngsys, -1) 1499 } 1500 1501 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 1502 1503 // newextram allocates m's and puts them on the extra list. 1504 // It is called with a working local m, so that it can do things 1505 // like call schedlock and allocate. 1506 func newextram() { 1507 c := atomic.Xchg(&extraMWaiters, 0) 1508 if c > 0 { 1509 for i := uint32(0); i < c; i++ { 1510 oneNewExtraM() 1511 } 1512 } else { 1513 // Make sure there is at least one extra M. 1514 mp := lockextra(true) 1515 unlockextra(mp) 1516 if mp == nil { 1517 oneNewExtraM() 1518 } 1519 } 1520 } 1521 1522 // oneNewExtraM allocates an m and puts it on the extra list. 1523 func oneNewExtraM() { 1524 // Create extra goroutine locked to extra m. 1525 // The goroutine is the context in which the cgo callback will run. 1526 // The sched.pc will never be returned to, but setting it to 1527 // goexit makes clear to the traceback routines where 1528 // the goroutine stack ends. 1529 mp := allocm(nil, nil) 1530 gp := malg(4096) 1531 gp.sched.pc = funcPC(goexit) + sys.PCQuantum 1532 gp.sched.sp = gp.stack.hi 1533 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame 1534 gp.sched.lr = 0 1535 gp.sched.g = guintptr(unsafe.Pointer(gp)) 1536 gp.syscallpc = gp.sched.pc 1537 gp.syscallsp = gp.sched.sp 1538 gp.stktopsp = gp.sched.sp 1539 // malg returns status as _Gidle. Change to _Gdead before 1540 // adding to allg where GC can see it. We use _Gdead to hide 1541 // this from tracebacks and stack scans since it isn't a 1542 // "real" goroutine until needm grabs it. 1543 casgstatus(gp, _Gidle, _Gdead) 1544 gp.m = mp 1545 mp.curg = gp 1546 mp.lockedInt++ 1547 mp.lockedg.set(gp) 1548 gp.lockedm.set(mp) 1549 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 1550 if raceenabled { 1551 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum) 1552 } 1553 // put on allg for garbage collector 1554 allgadd(gp) 1555 1556 // gp is now on the allg list, but we don't want it to be 1557 // counted by gcount. It would be more "proper" to increment 1558 // sched.ngfree, but that requires locking. Incrementing ngsys 1559 // has the same effect. 1560 atomic.Xadd(&sched.ngsys, +1) 1561 1562 // Add m to the extra list. 1563 mnext := lockextra(true) 1564 mp.schedlink.set(mnext) 1565 extraMCount++ 1566 unlockextra(mp) 1567 } 1568 1569 // dropm is called when a cgo callback has called needm but is now 1570 // done with the callback and returning back into the non-Go thread. 1571 // It puts the current m back onto the extra list. 1572 // 1573 // The main expense here is the call to signalstack to release the 1574 // m's signal stack, and then the call to needm on the next callback 1575 // from this thread. It is tempting to try to save the m for next time, 1576 // which would eliminate both these costs, but there might not be 1577 // a next time: the current thread (which Go does not control) might exit. 1578 // If we saved the m for that thread, there would be an m leak each time 1579 // such a thread exited. Instead, we acquire and release an m on each 1580 // call. These should typically not be scheduling operations, just a few 1581 // atomics, so the cost should be small. 1582 // 1583 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1584 // variable using pthread_key_create. Unlike the pthread keys we already use 1585 // on OS X, this dummy key would never be read by Go code. It would exist 1586 // only so that we could register at thread-exit-time destructor. 1587 // That destructor would put the m back onto the extra list. 1588 // This is purely a performance optimization. The current version, 1589 // in which dropm happens on each cgo call, is still correct too. 1590 // We may have to keep the current version on systems with cgo 1591 // but without pthreads, like Windows. 1592 func dropm() { 1593 // Clear m and g, and return m to the extra list. 1594 // After the call to setg we can only call nosplit functions 1595 // with no pointer manipulation. 1596 mp := getg().m 1597 1598 // Return mp.curg to dead state. 1599 casgstatus(mp.curg, _Gsyscall, _Gdead) 1600 mp.curg.preemptStop = false 1601 atomic.Xadd(&sched.ngsys, +1) 1602 1603 // Block signals before unminit. 1604 // Unminit unregisters the signal handling stack (but needs g on some systems). 1605 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 1606 // It's important not to try to handle a signal between those two steps. 1607 sigmask := mp.sigmask 1608 sigblock() 1609 unminit() 1610 1611 mnext := lockextra(true) 1612 extraMCount++ 1613 mp.schedlink.set(mnext) 1614 1615 setg(nil) 1616 1617 // Commit the release of mp. 1618 unlockextra(mp) 1619 1620 msigrestore(sigmask) 1621 } 1622 1623 // A helper function for EnsureDropM. 1624 func getm() uintptr { 1625 return uintptr(unsafe.Pointer(getg().m)) 1626 } 1627 1628 var extram uintptr 1629 var extraMCount uint32 // Protected by lockextra 1630 var extraMWaiters uint32 1631 1632 // lockextra locks the extra list and returns the list head. 1633 // The caller must unlock the list by storing a new list head 1634 // to extram. If nilokay is true, then lockextra will 1635 // return a nil list head if that's what it finds. If nilokay is false, 1636 // lockextra will keep waiting until the list head is no longer nil. 1637 //go:nosplit 1638 func lockextra(nilokay bool) *m { 1639 const locked = 1 1640 1641 incr := false 1642 for { 1643 old := atomic.Loaduintptr(&extram) 1644 if old == locked { 1645 yield := osyield 1646 yield() 1647 continue 1648 } 1649 if old == 0 && !nilokay { 1650 if !incr { 1651 // Add 1 to the number of threads 1652 // waiting for an M. 1653 // This is cleared by newextram. 1654 atomic.Xadd(&extraMWaiters, 1) 1655 incr = true 1656 } 1657 usleep(1) 1658 continue 1659 } 1660 if atomic.Casuintptr(&extram, old, locked) { 1661 return (*m)(unsafe.Pointer(old)) 1662 } 1663 yield := osyield 1664 yield() 1665 continue 1666 } 1667 } 1668 1669 //go:nosplit 1670 func unlockextra(mp *m) { 1671 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1672 } 1673 1674 // execLock serializes exec and clone to avoid bugs or unspecified behaviour 1675 // around exec'ing while creating/destroying threads. See issue #19546. 1676 var execLock rwmutex 1677 1678 // newmHandoff contains a list of m structures that need new OS threads. 1679 // This is used by newm in situations where newm itself can't safely 1680 // start an OS thread. 1681 var newmHandoff struct { 1682 lock mutex 1683 1684 // newm points to a list of M structures that need new OS 1685 // threads. The list is linked through m.schedlink. 1686 newm muintptr 1687 1688 // waiting indicates that wake needs to be notified when an m 1689 // is put on the list. 1690 waiting bool 1691 wake note 1692 1693 // haveTemplateThread indicates that the templateThread has 1694 // been started. This is not protected by lock. Use cas to set 1695 // to 1. 1696 haveTemplateThread uint32 1697 } 1698 1699 // Create a new m. It will start off with a call to fn, or else the scheduler. 1700 // fn needs to be static and not a heap allocated closure. 1701 // May run with m.p==nil, so write barriers are not allowed. 1702 //go:nowritebarrierrec 1703 func newm(fn func(), _p_ *p) { 1704 mp := allocm(_p_, fn) 1705 mp.nextp.set(_p_) 1706 mp.sigmask = initSigmask 1707 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" { 1708 // We're on a locked M or a thread that may have been 1709 // started by C. The kernel state of this thread may 1710 // be strange (the user may have locked it for that 1711 // purpose). We don't want to clone that into another 1712 // thread. Instead, ask a known-good thread to create 1713 // the thread for us. 1714 // 1715 // This is disabled on Plan 9. See golang.org/issue/22227. 1716 // 1717 // TODO: This may be unnecessary on Windows, which 1718 // doesn't model thread creation off fork. 1719 lock(&newmHandoff.lock) 1720 if newmHandoff.haveTemplateThread == 0 { 1721 throw("on a locked thread with no template thread") 1722 } 1723 mp.schedlink = newmHandoff.newm 1724 newmHandoff.newm.set(mp) 1725 if newmHandoff.waiting { 1726 newmHandoff.waiting = false 1727 notewakeup(&newmHandoff.wake) 1728 } 1729 unlock(&newmHandoff.lock) 1730 return 1731 } 1732 newm1(mp) 1733 } 1734 1735 func newm1(mp *m) { 1736 if iscgo { 1737 var ts cgothreadstart 1738 if _cgo_thread_start == nil { 1739 throw("_cgo_thread_start missing") 1740 } 1741 ts.g.set(mp.g0) 1742 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1743 ts.fn = unsafe.Pointer(funcPC(mstart)) 1744 if msanenabled { 1745 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 1746 } 1747 execLock.rlock() // Prevent process clone. 1748 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1749 execLock.runlock() 1750 return 1751 } 1752 execLock.rlock() // Prevent process clone. 1753 newosproc(mp) 1754 execLock.runlock() 1755 } 1756 1757 // startTemplateThread starts the template thread if it is not already 1758 // running. 1759 // 1760 // The calling thread must itself be in a known-good state. 1761 func startTemplateThread() { 1762 if GOARCH == "wasm" { // no threads on wasm yet 1763 return 1764 } 1765 1766 // Disable preemption to guarantee that the template thread will be 1767 // created before a park once haveTemplateThread is set. 1768 mp := acquirem() 1769 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) { 1770 releasem(mp) 1771 return 1772 } 1773 newm(templateThread, nil) 1774 releasem(mp) 1775 } 1776 1777 // templateThread is a thread in a known-good state that exists solely 1778 // to start new threads in known-good states when the calling thread 1779 // may not be in a good state. 1780 // 1781 // Many programs never need this, so templateThread is started lazily 1782 // when we first enter a state that might lead to running on a thread 1783 // in an unknown state. 1784 // 1785 // templateThread runs on an M without a P, so it must not have write 1786 // barriers. 1787 // 1788 //go:nowritebarrierrec 1789 func templateThread() { 1790 lock(&sched.lock) 1791 sched.nmsys++ 1792 checkdead() 1793 unlock(&sched.lock) 1794 1795 for { 1796 lock(&newmHandoff.lock) 1797 for newmHandoff.newm != 0 { 1798 newm := newmHandoff.newm.ptr() 1799 newmHandoff.newm = 0 1800 unlock(&newmHandoff.lock) 1801 for newm != nil { 1802 next := newm.schedlink.ptr() 1803 newm.schedlink = 0 1804 newm1(newm) 1805 newm = next 1806 } 1807 lock(&newmHandoff.lock) 1808 } 1809 newmHandoff.waiting = true 1810 noteclear(&newmHandoff.wake) 1811 unlock(&newmHandoff.lock) 1812 notesleep(&newmHandoff.wake) 1813 } 1814 } 1815 1816 // Stops execution of the current m until new work is available. 1817 // Returns with acquired P. 1818 func stopm() { 1819 _g_ := getg() 1820 1821 if _g_.m.locks != 0 { 1822 throw("stopm holding locks") 1823 } 1824 if _g_.m.p != 0 { 1825 throw("stopm holding p") 1826 } 1827 if _g_.m.spinning { 1828 throw("stopm spinning") 1829 } 1830 1831 lock(&sched.lock) 1832 mput(_g_.m) 1833 unlock(&sched.lock) 1834 notesleep(&_g_.m.park) 1835 noteclear(&_g_.m.park) 1836 acquirep(_g_.m.nextp.ptr()) 1837 _g_.m.nextp = 0 1838 } 1839 1840 func mspinning() { 1841 // startm's caller incremented nmspinning. Set the new M's spinning. 1842 getg().m.spinning = true 1843 } 1844 1845 // Schedules some M to run the p (creates an M if necessary). 1846 // If p==nil, tries to get an idle P, if no idle P's does nothing. 1847 // May run with m.p==nil, so write barriers are not allowed. 1848 // If spinning is set, the caller has incremented nmspinning and startm will 1849 // either decrement nmspinning or set m.spinning in the newly started M. 1850 //go:nowritebarrierrec 1851 func startm(_p_ *p, spinning bool) { 1852 lock(&sched.lock) 1853 if _p_ == nil { 1854 _p_ = pidleget() 1855 if _p_ == nil { 1856 unlock(&sched.lock) 1857 if spinning { 1858 // The caller incremented nmspinning, but there are no idle Ps, 1859 // so it's okay to just undo the increment and give up. 1860 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 1861 throw("startm: negative nmspinning") 1862 } 1863 } 1864 return 1865 } 1866 } 1867 mp := mget() 1868 unlock(&sched.lock) 1869 if mp == nil { 1870 var fn func() 1871 if spinning { 1872 // The caller incremented nmspinning, so set m.spinning in the new M. 1873 fn = mspinning 1874 } 1875 newm(fn, _p_) 1876 return 1877 } 1878 if mp.spinning { 1879 throw("startm: m is spinning") 1880 } 1881 if mp.nextp != 0 { 1882 throw("startm: m has p") 1883 } 1884 if spinning && !runqempty(_p_) { 1885 throw("startm: p has runnable gs") 1886 } 1887 // The caller incremented nmspinning, so set m.spinning in the new M. 1888 mp.spinning = spinning 1889 mp.nextp.set(_p_) 1890 notewakeup(&mp.park) 1891 } 1892 1893 // Hands off P from syscall or locked M. 1894 // Always runs without a P, so write barriers are not allowed. 1895 //go:nowritebarrierrec 1896 func handoffp(_p_ *p) { 1897 // handoffp must start an M in any situation where 1898 // findrunnable would return a G to run on _p_. 1899 1900 // if it has local work, start it straight away 1901 if !runqempty(_p_) || sched.runqsize != 0 { 1902 startm(_p_, false) 1903 return 1904 } 1905 // if it has GC work, start it straight away 1906 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 1907 startm(_p_, false) 1908 return 1909 } 1910 // no local work, check that there are no spinning/idle M's, 1911 // otherwise our help is not required 1912 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 1913 startm(_p_, true) 1914 return 1915 } 1916 lock(&sched.lock) 1917 if sched.gcwaiting != 0 { 1918 _p_.status = _Pgcstop 1919 sched.stopwait-- 1920 if sched.stopwait == 0 { 1921 notewakeup(&sched.stopnote) 1922 } 1923 unlock(&sched.lock) 1924 return 1925 } 1926 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 1927 sched.safePointFn(_p_) 1928 sched.safePointWait-- 1929 if sched.safePointWait == 0 { 1930 notewakeup(&sched.safePointNote) 1931 } 1932 } 1933 if sched.runqsize != 0 { 1934 unlock(&sched.lock) 1935 startm(_p_, false) 1936 return 1937 } 1938 // If this is the last running P and nobody is polling network, 1939 // need to wakeup another M to poll network. 1940 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 1941 unlock(&sched.lock) 1942 startm(_p_, false) 1943 return 1944 } 1945 if when := nobarrierWakeTime(_p_); when != 0 { 1946 wakeNetPoller(when) 1947 } 1948 pidleput(_p_) 1949 unlock(&sched.lock) 1950 } 1951 1952 // Tries to add one more P to execute G's. 1953 // Called when a G is made runnable (newproc, ready). 1954 func wakep() { 1955 // be conservative about spinning threads 1956 if !atomic.Cas(&sched.nmspinning, 0, 1) { 1957 return 1958 } 1959 startm(nil, true) 1960 } 1961 1962 // Stops execution of the current m that is locked to a g until the g is runnable again. 1963 // Returns with acquired P. 1964 func stoplockedm() { 1965 _g_ := getg() 1966 1967 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m { 1968 throw("stoplockedm: inconsistent locking") 1969 } 1970 if _g_.m.p != 0 { 1971 // Schedule another M to run this p. 1972 _p_ := releasep() 1973 handoffp(_p_) 1974 } 1975 incidlelocked(1) 1976 // Wait until another thread schedules lockedg again. 1977 notesleep(&_g_.m.park) 1978 noteclear(&_g_.m.park) 1979 status := readgstatus(_g_.m.lockedg.ptr()) 1980 if status&^_Gscan != _Grunnable { 1981 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 1982 dumpgstatus(_g_) 1983 throw("stoplockedm: not runnable") 1984 } 1985 acquirep(_g_.m.nextp.ptr()) 1986 _g_.m.nextp = 0 1987 } 1988 1989 // Schedules the locked m to run the locked gp. 1990 // May run during STW, so write barriers are not allowed. 1991 //go:nowritebarrierrec 1992 func startlockedm(gp *g) { 1993 _g_ := getg() 1994 1995 mp := gp.lockedm.ptr() 1996 if mp == _g_.m { 1997 throw("startlockedm: locked to me") 1998 } 1999 if mp.nextp != 0 { 2000 throw("startlockedm: m has p") 2001 } 2002 // directly handoff current P to the locked m 2003 incidlelocked(-1) 2004 _p_ := releasep() 2005 mp.nextp.set(_p_) 2006 notewakeup(&mp.park) 2007 stopm() 2008 } 2009 2010 // Stops the current m for stopTheWorld. 2011 // Returns when the world is restarted. 2012 func gcstopm() { 2013 _g_ := getg() 2014 2015 if sched.gcwaiting == 0 { 2016 throw("gcstopm: not waiting for gc") 2017 } 2018 if _g_.m.spinning { 2019 _g_.m.spinning = false 2020 // OK to just drop nmspinning here, 2021 // startTheWorld will unpark threads as necessary. 2022 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2023 throw("gcstopm: negative nmspinning") 2024 } 2025 } 2026 _p_ := releasep() 2027 lock(&sched.lock) 2028 _p_.status = _Pgcstop 2029 sched.stopwait-- 2030 if sched.stopwait == 0 { 2031 notewakeup(&sched.stopnote) 2032 } 2033 unlock(&sched.lock) 2034 stopm() 2035 } 2036 2037 // Schedules gp to run on the current M. 2038 // If inheritTime is true, gp inherits the remaining time in the 2039 // current time slice. Otherwise, it starts a new time slice. 2040 // Never returns. 2041 // 2042 // Write barriers are allowed because this is called immediately after 2043 // acquiring a P in several places. 2044 // 2045 //go:yeswritebarrierrec 2046 func execute(gp *g, inheritTime bool) { 2047 _g_ := getg() 2048 2049 // Assign gp.m before entering _Grunning so running Gs have an 2050 // M. 2051 _g_.m.curg = gp 2052 gp.m = _g_.m 2053 casgstatus(gp, _Grunnable, _Grunning) 2054 gp.waitsince = 0 2055 gp.preempt = false 2056 gp.stackguard0 = gp.stack.lo + _StackGuard 2057 if !inheritTime { 2058 _g_.m.p.ptr().schedtick++ 2059 } 2060 2061 // Check whether the profiler needs to be turned on or off. 2062 hz := sched.profilehz 2063 if _g_.m.profilehz != hz { 2064 setThreadCPUProfiler(hz) 2065 } 2066 2067 if trace.enabled { 2068 // GoSysExit has to happen when we have a P, but before GoStart. 2069 // So we emit it here. 2070 if gp.syscallsp != 0 && gp.sysblocktraced { 2071 traceGoSysExit(gp.sysexitticks) 2072 } 2073 traceGoStart() 2074 } 2075 2076 gogo(&gp.sched) 2077 } 2078 2079 // Finds a runnable goroutine to execute. 2080 // Tries to steal from other P's, get g from local or global queue, poll network. 2081 func findrunnable() (gp *g, inheritTime bool) { 2082 _g_ := getg() 2083 2084 // The conditions here and in handoffp must agree: if 2085 // findrunnable would return a G to run, handoffp must start 2086 // an M. 2087 2088 top: 2089 _p_ := _g_.m.p.ptr() 2090 if sched.gcwaiting != 0 { 2091 gcstopm() 2092 goto top 2093 } 2094 if _p_.runSafePointFn != 0 { 2095 runSafePointFn() 2096 } 2097 2098 now, pollUntil, _ := checkTimers(_p_, 0) 2099 2100 if fingwait && fingwake { 2101 if gp := wakefing(); gp != nil { 2102 ready(gp, 0, true) 2103 } 2104 } 2105 if *cgo_yield != nil { 2106 asmcgocall(*cgo_yield, nil) 2107 } 2108 2109 // local runq 2110 if gp, inheritTime := runqget(_p_); gp != nil { 2111 return gp, inheritTime 2112 } 2113 2114 // global runq 2115 if sched.runqsize != 0 { 2116 lock(&sched.lock) 2117 gp := globrunqget(_p_, 0) 2118 unlock(&sched.lock) 2119 if gp != nil { 2120 return gp, false 2121 } 2122 } 2123 2124 // Poll network. 2125 // This netpoll is only an optimization before we resort to stealing. 2126 // We can safely skip it if there are no waiters or a thread is blocked 2127 // in netpoll already. If there is any kind of logical race with that 2128 // blocked thread (e.g. it has already returned from netpoll, but does 2129 // not set lastpoll yet), this thread will do blocking netpoll below 2130 // anyway. 2131 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 { 2132 if list := netpoll(0); !list.empty() { // non-blocking 2133 gp := list.pop() 2134 injectglist(&list) 2135 casgstatus(gp, _Gwaiting, _Grunnable) 2136 if trace.enabled { 2137 traceGoUnpark(gp, 0) 2138 } 2139 return gp, false 2140 } 2141 } 2142 2143 // Steal work from other P's. 2144 procs := uint32(gomaxprocs) 2145 ranTimer := false 2146 // If number of spinning M's >= number of busy P's, block. 2147 // This is necessary to prevent excessive CPU consumption 2148 // when GOMAXPROCS>>1 but the program parallelism is low. 2149 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { 2150 goto stop 2151 } 2152 if !_g_.m.spinning { 2153 _g_.m.spinning = true 2154 atomic.Xadd(&sched.nmspinning, 1) 2155 } 2156 for i := 0; i < 4; i++ { 2157 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { 2158 if sched.gcwaiting != 0 { 2159 goto top 2160 } 2161 stealRunNextG := i > 2 // first look for ready queues with more than 1 g 2162 p2 := allp[enum.position()] 2163 if _p_ == p2 { 2164 continue 2165 } 2166 if gp := runqsteal(_p_, p2, stealRunNextG); gp != nil { 2167 return gp, false 2168 } 2169 2170 // Consider stealing timers from p2. 2171 // This call to checkTimers is the only place where 2172 // we hold a lock on a different P's timers. 2173 // Lock contention can be a problem here, so avoid 2174 // grabbing the lock if p2 is running and not marked 2175 // for preemption. If p2 is running and not being 2176 // preempted we assume it will handle its own timers. 2177 if i > 2 && shouldStealTimers(p2) { 2178 tnow, w, ran := checkTimers(p2, now) 2179 now = tnow 2180 if w != 0 && (pollUntil == 0 || w < pollUntil) { 2181 pollUntil = w 2182 } 2183 if ran { 2184 // Running the timers may have 2185 // made an arbitrary number of G's 2186 // ready and added them to this P's 2187 // local run queue. That invalidates 2188 // the assumption of runqsteal 2189 // that is always has room to add 2190 // stolen G's. So check now if there 2191 // is a local G to run. 2192 if gp, inheritTime := runqget(_p_); gp != nil { 2193 return gp, inheritTime 2194 } 2195 ranTimer = true 2196 } 2197 } 2198 } 2199 } 2200 if ranTimer { 2201 // Running a timer may have made some goroutine ready. 2202 goto top 2203 } 2204 2205 stop: 2206 2207 // We have nothing to do. If we're in the GC mark phase, can 2208 // safely scan and blacken objects, and have work to do, run 2209 // idle-time marking rather than give up the P. 2210 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) { 2211 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 2212 gp := _p_.gcBgMarkWorker.ptr() 2213 casgstatus(gp, _Gwaiting, _Grunnable) 2214 if trace.enabled { 2215 traceGoUnpark(gp, 0) 2216 } 2217 return gp, false 2218 } 2219 2220 delta := int64(-1) 2221 if pollUntil != 0 { 2222 // checkTimers ensures that polluntil > now. 2223 delta = pollUntil - now 2224 } 2225 2226 // wasm only: 2227 // If a callback returned and no other goroutine is awake, 2228 // then pause execution until a callback was triggered. 2229 if beforeIdle(delta) { 2230 // At least one goroutine got woken. 2231 goto top 2232 } 2233 2234 // Before we drop our P, make a snapshot of the allp slice, 2235 // which can change underfoot once we no longer block 2236 // safe-points. We don't need to snapshot the contents because 2237 // everything up to cap(allp) is immutable. 2238 allpSnapshot := allp 2239 2240 // return P and block 2241 lock(&sched.lock) 2242 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { 2243 unlock(&sched.lock) 2244 goto top 2245 } 2246 if sched.runqsize != 0 { 2247 gp := globrunqget(_p_, 0) 2248 unlock(&sched.lock) 2249 return gp, false 2250 } 2251 if releasep() != _p_ { 2252 throw("findrunnable: wrong p") 2253 } 2254 pidleput(_p_) 2255 unlock(&sched.lock) 2256 2257 // Delicate dance: thread transitions from spinning to non-spinning state, 2258 // potentially concurrently with submission of new goroutines. We must 2259 // drop nmspinning first and then check all per-P queues again (with 2260 // #StoreLoad memory barrier in between). If we do it the other way around, 2261 // another thread can submit a goroutine after we've checked all run queues 2262 // but before we drop nmspinning; as the result nobody will unpark a thread 2263 // to run the goroutine. 2264 // If we discover new work below, we need to restore m.spinning as a signal 2265 // for resetspinning to unpark a new worker thread (because there can be more 2266 // than one starving goroutine). However, if after discovering new work 2267 // we also observe no idle Ps, it is OK to just park the current thread: 2268 // the system is fully loaded so no spinning threads are required. 2269 // Also see "Worker thread parking/unparking" comment at the top of the file. 2270 wasSpinning := _g_.m.spinning 2271 if _g_.m.spinning { 2272 _g_.m.spinning = false 2273 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2274 throw("findrunnable: negative nmspinning") 2275 } 2276 } 2277 2278 // check all runqueues once again 2279 for _, _p_ := range allpSnapshot { 2280 if !runqempty(_p_) { 2281 lock(&sched.lock) 2282 _p_ = pidleget() 2283 unlock(&sched.lock) 2284 if _p_ != nil { 2285 acquirep(_p_) 2286 if wasSpinning { 2287 _g_.m.spinning = true 2288 atomic.Xadd(&sched.nmspinning, 1) 2289 } 2290 goto top 2291 } 2292 break 2293 } 2294 } 2295 2296 // Check for idle-priority GC work again. 2297 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) { 2298 lock(&sched.lock) 2299 _p_ = pidleget() 2300 if _p_ != nil && _p_.gcBgMarkWorker == 0 { 2301 pidleput(_p_) 2302 _p_ = nil 2303 } 2304 unlock(&sched.lock) 2305 if _p_ != nil { 2306 acquirep(_p_) 2307 if wasSpinning { 2308 _g_.m.spinning = true 2309 atomic.Xadd(&sched.nmspinning, 1) 2310 } 2311 // Go back to idle GC check. 2312 goto stop 2313 } 2314 } 2315 2316 // poll network 2317 if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 2318 atomic.Store64(&sched.pollUntil, uint64(pollUntil)) 2319 if _g_.m.p != 0 { 2320 throw("findrunnable: netpoll with p") 2321 } 2322 if _g_.m.spinning { 2323 throw("findrunnable: netpoll with spinning") 2324 } 2325 if faketime != 0 { 2326 // When using fake time, just poll. 2327 delta = 0 2328 } 2329 list := netpoll(delta) // block until new work is available 2330 atomic.Store64(&sched.pollUntil, 0) 2331 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 2332 if faketime != 0 && list.empty() { 2333 // Using fake time and nothing is ready; stop M. 2334 // When all M's stop, checkdead will call timejump. 2335 stopm() 2336 goto top 2337 } 2338 lock(&sched.lock) 2339 _p_ = pidleget() 2340 unlock(&sched.lock) 2341 if _p_ == nil { 2342 injectglist(&list) 2343 } else { 2344 acquirep(_p_) 2345 if !list.empty() { 2346 gp := list.pop() 2347 injectglist(&list) 2348 casgstatus(gp, _Gwaiting, _Grunnable) 2349 if trace.enabled { 2350 traceGoUnpark(gp, 0) 2351 } 2352 return gp, false 2353 } 2354 if wasSpinning { 2355 _g_.m.spinning = true 2356 atomic.Xadd(&sched.nmspinning, 1) 2357 } 2358 goto top 2359 } 2360 } else if pollUntil != 0 && netpollinited() { 2361 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) 2362 if pollerPollUntil == 0 || pollerPollUntil > pollUntil { 2363 netpollBreak() 2364 } 2365 } 2366 stopm() 2367 goto top 2368 } 2369 2370 // pollWork reports whether there is non-background work this P could 2371 // be doing. This is a fairly lightweight check to be used for 2372 // background work loops, like idle GC. It checks a subset of the 2373 // conditions checked by the actual scheduler. 2374 func pollWork() bool { 2375 if sched.runqsize != 0 { 2376 return true 2377 } 2378 p := getg().m.p.ptr() 2379 if !runqempty(p) { 2380 return true 2381 } 2382 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { 2383 if list := netpoll(0); !list.empty() { 2384 injectglist(&list) 2385 return true 2386 } 2387 } 2388 return false 2389 } 2390 2391 // wakeNetPoller wakes up the thread sleeping in the network poller, 2392 // if there is one, and if it isn't going to wake up anyhow before 2393 // the when argument. 2394 func wakeNetPoller(when int64) { 2395 if atomic.Load64(&sched.lastpoll) == 0 { 2396 // In findrunnable we ensure that when polling the pollUntil 2397 // field is either zero or the time to which the current 2398 // poll is expected to run. This can have a spurious wakeup 2399 // but should never miss a wakeup. 2400 pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) 2401 if pollerPollUntil == 0 || pollerPollUntil > when { 2402 netpollBreak() 2403 } 2404 } 2405 } 2406 2407 func resetspinning() { 2408 _g_ := getg() 2409 if !_g_.m.spinning { 2410 throw("resetspinning: not a spinning m") 2411 } 2412 _g_.m.spinning = false 2413 nmspinning := atomic.Xadd(&sched.nmspinning, -1) 2414 if int32(nmspinning) < 0 { 2415 throw("findrunnable: negative nmspinning") 2416 } 2417 // M wakeup policy is deliberately somewhat conservative, so check if we 2418 // need to wakeup another P here. See "Worker thread parking/unparking" 2419 // comment at the top of the file for details. 2420 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 { 2421 wakep() 2422 } 2423 } 2424 2425 // Injects the list of runnable G's into the scheduler and clears glist. 2426 // Can run concurrently with GC. 2427 func injectglist(glist *gList) { 2428 if glist.empty() { 2429 return 2430 } 2431 if trace.enabled { 2432 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { 2433 traceGoUnpark(gp, 0) 2434 } 2435 } 2436 lock(&sched.lock) 2437 var n int 2438 for n = 0; !glist.empty(); n++ { 2439 gp := glist.pop() 2440 casgstatus(gp, _Gwaiting, _Grunnable) 2441 globrunqput(gp) 2442 } 2443 unlock(&sched.lock) 2444 for ; n != 0 && sched.npidle != 0; n-- { 2445 startm(nil, false) 2446 } 2447 *glist = gList{} 2448 } 2449 2450 // One round of scheduler: find a runnable goroutine and execute it. 2451 // Never returns. 2452 func schedule() { 2453 _g_ := getg() 2454 2455 if _g_.m.locks != 0 { 2456 throw("schedule: holding locks") 2457 } 2458 2459 if _g_.m.lockedg != 0 { 2460 stoplockedm() 2461 execute(_g_.m.lockedg.ptr(), false) // Never returns. 2462 } 2463 2464 // We should not schedule away from a g that is executing a cgo call, 2465 // since the cgo call is using the m's g0 stack. 2466 if _g_.m.incgo { 2467 throw("schedule: in cgo") 2468 } 2469 2470 top: 2471 pp := _g_.m.p.ptr() 2472 pp.preempt = false 2473 2474 if sched.gcwaiting != 0 { 2475 gcstopm() 2476 goto top 2477 } 2478 if pp.runSafePointFn != 0 { 2479 runSafePointFn() 2480 } 2481 2482 // Sanity check: if we are spinning, the run queue should be empty. 2483 // Check this before calling checkTimers, as that might call 2484 // goready to put a ready goroutine on the local run queue. 2485 if _g_.m.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) { 2486 throw("schedule: spinning with local work") 2487 } 2488 2489 checkTimers(pp, 0) 2490 2491 var gp *g 2492 var inheritTime bool 2493 2494 // Normal goroutines will check for need to wakeP in ready, 2495 // but GCworkers and tracereaders will not, so the check must 2496 // be done here instead. 2497 tryWakeP := false 2498 if trace.enabled || trace.shutdown { 2499 gp = traceReader() 2500 if gp != nil { 2501 casgstatus(gp, _Gwaiting, _Grunnable) 2502 traceGoUnpark(gp, 0) 2503 tryWakeP = true 2504 } 2505 } 2506 if gp == nil && gcBlackenEnabled != 0 { 2507 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 2508 tryWakeP = tryWakeP || gp != nil 2509 } 2510 if gp == nil { 2511 // Check the global runnable queue once in a while to ensure fairness. 2512 // Otherwise two goroutines can completely occupy the local runqueue 2513 // by constantly respawning each other. 2514 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 2515 lock(&sched.lock) 2516 gp = globrunqget(_g_.m.p.ptr(), 1) 2517 unlock(&sched.lock) 2518 } 2519 } 2520 if gp == nil { 2521 gp, inheritTime = runqget(_g_.m.p.ptr()) 2522 // We can see gp != nil here even if the M is spinning, 2523 // if checkTimers added a local goroutine via goready. 2524 } 2525 if gp == nil { 2526 gp, inheritTime = findrunnable() // blocks until work is available 2527 } 2528 2529 // This thread is going to run a goroutine and is not spinning anymore, 2530 // so if it was marked as spinning we need to reset it now and potentially 2531 // start a new spinning M. 2532 if _g_.m.spinning { 2533 resetspinning() 2534 } 2535 2536 if sched.disable.user && !schedEnabled(gp) { 2537 // Scheduling of this goroutine is disabled. Put it on 2538 // the list of pending runnable goroutines for when we 2539 // re-enable user scheduling and look again. 2540 lock(&sched.lock) 2541 if schedEnabled(gp) { 2542 // Something re-enabled scheduling while we 2543 // were acquiring the lock. 2544 unlock(&sched.lock) 2545 } else { 2546 sched.disable.runnable.pushBack(gp) 2547 sched.disable.n++ 2548 unlock(&sched.lock) 2549 goto top 2550 } 2551 } 2552 2553 // If about to schedule a not-normal goroutine (a GCworker or tracereader), 2554 // wake a P if there is one. 2555 if tryWakeP { 2556 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 2557 wakep() 2558 } 2559 } 2560 if gp.lockedm != 0 { 2561 // Hands off own p to the locked m, 2562 // then blocks waiting for a new p. 2563 startlockedm(gp) 2564 goto top 2565 } 2566 2567 execute(gp, inheritTime) 2568 } 2569 2570 // dropg removes the association between m and the current goroutine m->curg (gp for short). 2571 // Typically a caller sets gp's status away from Grunning and then 2572 // immediately calls dropg to finish the job. The caller is also responsible 2573 // for arranging that gp will be restarted using ready at an 2574 // appropriate time. After calling dropg and arranging for gp to be 2575 // readied later, the caller can do other work but eventually should 2576 // call schedule to restart the scheduling of goroutines on this m. 2577 func dropg() { 2578 _g_ := getg() 2579 2580 setMNoWB(&_g_.m.curg.m, nil) 2581 setGNoWB(&_g_.m.curg, nil) 2582 } 2583 2584 // checkTimers runs any timers for the P that are ready. 2585 // If now is not 0 it is the current time. 2586 // It returns the current time or 0 if it is not known, 2587 // and the time when the next timer should run or 0 if there is no next timer, 2588 // and reports whether it ran any timers. 2589 // If the time when the next timer should run is not 0, 2590 // it is always larger than the returned time. 2591 // We pass now in and out to avoid extra calls of nanotime. 2592 //go:yeswritebarrierrec 2593 func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) { 2594 // If there are no timers to adjust, and the first timer on 2595 // the heap is not yet ready to run, then there is nothing to do. 2596 if atomic.Load(&pp.adjustTimers) == 0 { 2597 next := int64(atomic.Load64(&pp.timer0When)) 2598 if next == 0 { 2599 return now, 0, false 2600 } 2601 if now == 0 { 2602 now = nanotime() 2603 } 2604 if now < next { 2605 // Next timer is not ready to run. 2606 // But keep going if we would clear deleted timers. 2607 // This corresponds to the condition below where 2608 // we decide whether to call clearDeletedTimers. 2609 if pp != getg().m.p.ptr() || int(atomic.Load(&pp.deletedTimers)) <= int(atomic.Load(&pp.numTimers)/4) { 2610 return now, next, false 2611 } 2612 } 2613 } 2614 2615 lock(&pp.timersLock) 2616 2617 adjusttimers(pp) 2618 2619 rnow = now 2620 if len(pp.timers) > 0 { 2621 if rnow == 0 { 2622 rnow = nanotime() 2623 } 2624 for len(pp.timers) > 0 { 2625 // Note that runtimer may temporarily unlock 2626 // pp.timersLock. 2627 if tw := runtimer(pp, rnow); tw != 0 { 2628 if tw > 0 { 2629 pollUntil = tw 2630 } 2631 break 2632 } 2633 ran = true 2634 } 2635 } 2636 2637 // If this is the local P, and there are a lot of deleted timers, 2638 // clear them out. We only do this for the local P to reduce 2639 // lock contention on timersLock. 2640 if pp == getg().m.p.ptr() && int(atomic.Load(&pp.deletedTimers)) > len(pp.timers)/4 { 2641 clearDeletedTimers(pp) 2642 } 2643 2644 unlock(&pp.timersLock) 2645 2646 return rnow, pollUntil, ran 2647 } 2648 2649 // shouldStealTimers reports whether we should try stealing the timers from p2. 2650 // We don't steal timers from a running P that is not marked for preemption, 2651 // on the assumption that it will run its own timers. This reduces 2652 // contention on the timers lock. 2653 func shouldStealTimers(p2 *p) bool { 2654 if p2.status != _Prunning { 2655 return true 2656 } 2657 mp := p2.m.ptr() 2658 if mp == nil || mp.locks > 0 { 2659 return false 2660 } 2661 gp := mp.curg 2662 if gp == nil || gp.atomicstatus != _Grunning || !gp.preempt { 2663 return false 2664 } 2665 return true 2666 } 2667 2668 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 2669 unlock((*mutex)(lock)) 2670 return true 2671 } 2672 2673 // park continuation on g0. 2674 func park_m(gp *g) { 2675 _g_ := getg() 2676 2677 if trace.enabled { 2678 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip) 2679 } 2680 2681 casgstatus(gp, _Grunning, _Gwaiting) 2682 dropg() 2683 2684 if fn := _g_.m.waitunlockf; fn != nil { 2685 ok := fn(gp, _g_.m.waitlock) 2686 _g_.m.waitunlockf = nil 2687 _g_.m.waitlock = nil 2688 if !ok { 2689 if trace.enabled { 2690 traceGoUnpark(gp, 2) 2691 } 2692 casgstatus(gp, _Gwaiting, _Grunnable) 2693 execute(gp, true) // Schedule it back, never returns. 2694 } 2695 } 2696 schedule() 2697 } 2698 2699 func goschedImpl(gp *g) { 2700 status := readgstatus(gp) 2701 if status&^_Gscan != _Grunning { 2702 dumpgstatus(gp) 2703 throw("bad g status") 2704 } 2705 casgstatus(gp, _Grunning, _Grunnable) 2706 dropg() 2707 lock(&sched.lock) 2708 globrunqput(gp) 2709 unlock(&sched.lock) 2710 2711 schedule() 2712 } 2713 2714 // Gosched continuation on g0. 2715 func gosched_m(gp *g) { 2716 if trace.enabled { 2717 traceGoSched() 2718 } 2719 goschedImpl(gp) 2720 } 2721 2722 // goschedguarded is a forbidden-states-avoided version of gosched_m 2723 func goschedguarded_m(gp *g) { 2724 2725 if !canPreemptM(gp.m) { 2726 gogo(&gp.sched) // never return 2727 } 2728 2729 if trace.enabled { 2730 traceGoSched() 2731 } 2732 goschedImpl(gp) 2733 } 2734 2735 func gopreempt_m(gp *g) { 2736 if trace.enabled { 2737 traceGoPreempt() 2738 } 2739 goschedImpl(gp) 2740 } 2741 2742 // preemptPark parks gp and puts it in _Gpreempted. 2743 // 2744 //go:systemstack 2745 func preemptPark(gp *g) { 2746 if trace.enabled { 2747 traceGoPark(traceEvGoBlock, 0) 2748 } 2749 status := readgstatus(gp) 2750 if status&^_Gscan != _Grunning { 2751 dumpgstatus(gp) 2752 throw("bad g status") 2753 } 2754 gp.waitreason = waitReasonPreempted 2755 // Transition from _Grunning to _Gscan|_Gpreempted. We can't 2756 // be in _Grunning when we dropg because then we'd be running 2757 // without an M, but the moment we're in _Gpreempted, 2758 // something could claim this G before we've fully cleaned it 2759 // up. Hence, we set the scan bit to lock down further 2760 // transitions until we can dropg. 2761 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted) 2762 dropg() 2763 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted) 2764 schedule() 2765 } 2766 2767 // goyield is like Gosched, but it: 2768 // - emits a GoPreempt trace event instead of a GoSched trace event 2769 // - puts the current G on the runq of the current P instead of the globrunq 2770 func goyield() { 2771 checkTimeouts() 2772 mcall(goyield_m) 2773 } 2774 2775 func goyield_m(gp *g) { 2776 if trace.enabled { 2777 traceGoPreempt() 2778 } 2779 pp := gp.m.p.ptr() 2780 casgstatus(gp, _Grunning, _Grunnable) 2781 dropg() 2782 runqput(pp, gp, false) 2783 schedule() 2784 } 2785 2786 // Finishes execution of the current goroutine. 2787 func goexit1() { 2788 if raceenabled { 2789 racegoend() 2790 } 2791 if trace.enabled { 2792 traceGoEnd() 2793 } 2794 mcall(goexit0) 2795 } 2796 2797 // goexit continuation on g0. 2798 func goexit0(gp *g) { 2799 _g_ := getg() 2800 2801 casgstatus(gp, _Grunning, _Gdead) 2802 if isSystemGoroutine(gp, false) { 2803 atomic.Xadd(&sched.ngsys, -1) 2804 } 2805 gp.m = nil 2806 locked := gp.lockedm != 0 2807 gp.lockedm = 0 2808 _g_.m.lockedg = 0 2809 gp.preemptStop = false 2810 gp.paniconfault = false 2811 gp._defer = nil // should be true already but just in case. 2812 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 2813 gp.writebuf = nil 2814 gp.waitreason = 0 2815 gp.param = nil 2816 gp.labels = nil 2817 gp.timer = nil 2818 2819 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { 2820 // Flush assist credit to the global pool. This gives 2821 // better information to pacing if the application is 2822 // rapidly creating an exiting goroutines. 2823 scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes)) 2824 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit) 2825 gp.gcAssistBytes = 0 2826 } 2827 2828 dropg() 2829 2830 if GOARCH == "wasm" { // no threads yet on wasm 2831 gfput(_g_.m.p.ptr(), gp) 2832 schedule() // never returns 2833 } 2834 2835 if _g_.m.lockedInt != 0 { 2836 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n") 2837 throw("internal lockOSThread error") 2838 } 2839 gfput(_g_.m.p.ptr(), gp) 2840 if locked { 2841 // The goroutine may have locked this thread because 2842 // it put it in an unusual kernel state. Kill it 2843 // rather than returning it to the thread pool. 2844 2845 // Return to mstart, which will release the P and exit 2846 // the thread. 2847 if GOOS != "plan9" { // See golang.org/issue/22227. 2848 gogo(&_g_.m.g0.sched) 2849 } else { 2850 // Clear lockedExt on plan9 since we may end up re-using 2851 // this thread. 2852 _g_.m.lockedExt = 0 2853 } 2854 } 2855 schedule() 2856 } 2857 2858 // save updates getg().sched to refer to pc and sp so that a following 2859 // gogo will restore pc and sp. 2860 // 2861 // save must not have write barriers because invoking a write barrier 2862 // can clobber getg().sched. 2863 // 2864 //go:nosplit 2865 //go:nowritebarrierrec 2866 func save(pc, sp uintptr) { 2867 _g_ := getg() 2868 2869 _g_.sched.pc = pc 2870 _g_.sched.sp = sp 2871 _g_.sched.lr = 0 2872 _g_.sched.ret = 0 2873 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 2874 // We need to ensure ctxt is zero, but can't have a write 2875 // barrier here. However, it should always already be zero. 2876 // Assert that. 2877 if _g_.sched.ctxt != nil { 2878 badctxt() 2879 } 2880 } 2881 2882 // The goroutine g is about to enter a system call. 2883 // Record that it's not using the cpu anymore. 2884 // This is called only from the go syscall library and cgocall, 2885 // not from the low-level system calls used by the runtime. 2886 // 2887 // Entersyscall cannot split the stack: the gosave must 2888 // make g->sched refer to the caller's stack segment, because 2889 // entersyscall is going to return immediately after. 2890 // 2891 // Nothing entersyscall calls can split the stack either. 2892 // We cannot safely move the stack during an active call to syscall, 2893 // because we do not know which of the uintptr arguments are 2894 // really pointers (back into the stack). 2895 // In practice, this means that we make the fast path run through 2896 // entersyscall doing no-split things, and the slow path has to use systemstack 2897 // to run bigger things on the system stack. 2898 // 2899 // reentersyscall is the entry point used by cgo callbacks, where explicitly 2900 // saved SP and PC are restored. This is needed when exitsyscall will be called 2901 // from a function further up in the call stack than the parent, as g->syscallsp 2902 // must always point to a valid stack frame. entersyscall below is the normal 2903 // entry point for syscalls, which obtains the SP and PC from the caller. 2904 // 2905 // Syscall tracing: 2906 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 2907 // If the syscall does not block, that is it, we do not emit any other events. 2908 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 2909 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 2910 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 2911 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 2912 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 2913 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 2914 // and we wait for the increment before emitting traceGoSysExit. 2915 // Note that the increment is done even if tracing is not enabled, 2916 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 2917 // 2918 //go:nosplit 2919 func reentersyscall(pc, sp uintptr) { 2920 _g_ := getg() 2921 2922 // Disable preemption because during this function g is in Gsyscall status, 2923 // but can have inconsistent g->sched, do not let GC observe it. 2924 _g_.m.locks++ 2925 2926 // Entersyscall must not call any function that might split/grow the stack. 2927 // (See details in comment above.) 2928 // Catch calls that might, by replacing the stack guard with something that 2929 // will trip any stack check and leaving a flag to tell newstack to die. 2930 _g_.stackguard0 = stackPreempt 2931 _g_.throwsplit = true 2932 2933 // Leave SP around for GC and traceback. 2934 save(pc, sp) 2935 _g_.syscallsp = sp 2936 _g_.syscallpc = pc 2937 casgstatus(_g_, _Grunning, _Gsyscall) 2938 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2939 systemstack(func() { 2940 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2941 throw("entersyscall") 2942 }) 2943 } 2944 2945 if trace.enabled { 2946 systemstack(traceGoSysCall) 2947 // systemstack itself clobbers g.sched.{pc,sp} and we might 2948 // need them later when the G is genuinely blocked in a 2949 // syscall 2950 save(pc, sp) 2951 } 2952 2953 if atomic.Load(&sched.sysmonwait) != 0 { 2954 systemstack(entersyscall_sysmon) 2955 save(pc, sp) 2956 } 2957 2958 if _g_.m.p.ptr().runSafePointFn != 0 { 2959 // runSafePointFn may stack split if run on this stack 2960 systemstack(runSafePointFn) 2961 save(pc, sp) 2962 } 2963 2964 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2965 _g_.sysblocktraced = true 2966 _g_.m.mcache = nil 2967 pp := _g_.m.p.ptr() 2968 pp.m = 0 2969 _g_.m.oldp.set(pp) 2970 _g_.m.p = 0 2971 atomic.Store(&pp.status, _Psyscall) 2972 if sched.gcwaiting != 0 { 2973 systemstack(entersyscall_gcwait) 2974 save(pc, sp) 2975 } 2976 2977 _g_.m.locks-- 2978 } 2979 2980 // Standard syscall entry used by the go syscall library and normal cgo calls. 2981 // 2982 // This is exported via linkname to assembly in the syscall package. 2983 // 2984 //go:nosplit 2985 //go:linkname entersyscall 2986 func entersyscall() { 2987 reentersyscall(getcallerpc(), getcallersp()) 2988 } 2989 2990 func entersyscall_sysmon() { 2991 lock(&sched.lock) 2992 if atomic.Load(&sched.sysmonwait) != 0 { 2993 atomic.Store(&sched.sysmonwait, 0) 2994 notewakeup(&sched.sysmonnote) 2995 } 2996 unlock(&sched.lock) 2997 } 2998 2999 func entersyscall_gcwait() { 3000 _g_ := getg() 3001 _p_ := _g_.m.oldp.ptr() 3002 3003 lock(&sched.lock) 3004 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 3005 if trace.enabled { 3006 traceGoSysBlock(_p_) 3007 traceProcStop(_p_) 3008 } 3009 _p_.syscalltick++ 3010 if sched.stopwait--; sched.stopwait == 0 { 3011 notewakeup(&sched.stopnote) 3012 } 3013 } 3014 unlock(&sched.lock) 3015 } 3016 3017 // The same as entersyscall(), but with a hint that the syscall is blocking. 3018 //go:nosplit 3019 func entersyscallblock() { 3020 _g_ := getg() 3021 3022 _g_.m.locks++ // see comment in entersyscall 3023 _g_.throwsplit = true 3024 _g_.stackguard0 = stackPreempt // see comment in entersyscall 3025 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 3026 _g_.sysblocktraced = true 3027 _g_.m.p.ptr().syscalltick++ 3028 3029 // Leave SP around for GC and traceback. 3030 pc := getcallerpc() 3031 sp := getcallersp() 3032 save(pc, sp) 3033 _g_.syscallsp = _g_.sched.sp 3034 _g_.syscallpc = _g_.sched.pc 3035 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 3036 sp1 := sp 3037 sp2 := _g_.sched.sp 3038 sp3 := _g_.syscallsp 3039 systemstack(func() { 3040 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 3041 throw("entersyscallblock") 3042 }) 3043 } 3044 casgstatus(_g_, _Grunning, _Gsyscall) 3045 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 3046 systemstack(func() { 3047 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 3048 throw("entersyscallblock") 3049 }) 3050 } 3051 3052 systemstack(entersyscallblock_handoff) 3053 3054 // Resave for traceback during blocked call. 3055 save(getcallerpc(), getcallersp()) 3056 3057 _g_.m.locks-- 3058 } 3059 3060 func entersyscallblock_handoff() { 3061 if trace.enabled { 3062 traceGoSysCall() 3063 traceGoSysBlock(getg().m.p.ptr()) 3064 } 3065 handoffp(releasep()) 3066 } 3067 3068 // The goroutine g exited its system call. 3069 // Arrange for it to run on a cpu again. 3070 // This is called only from the go syscall library, not 3071 // from the low-level system calls used by the runtime. 3072 // 3073 // Write barriers are not allowed because our P may have been stolen. 3074 // 3075 // This is exported via linkname to assembly in the syscall package. 3076 // 3077 //go:nosplit 3078 //go:nowritebarrierrec 3079 //go:linkname exitsyscall 3080 func exitsyscall() { 3081 _g_ := getg() 3082 3083 _g_.m.locks++ // see comment in entersyscall 3084 if getcallersp() > _g_.syscallsp { 3085 throw("exitsyscall: syscall frame is no longer valid") 3086 } 3087 3088 _g_.waitsince = 0 3089 oldp := _g_.m.oldp.ptr() 3090 _g_.m.oldp = 0 3091 if exitsyscallfast(oldp) { 3092 if _g_.m.mcache == nil { 3093 throw("lost mcache") 3094 } 3095 if trace.enabled { 3096 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 3097 systemstack(traceGoStart) 3098 } 3099 } 3100 // There's a cpu for us, so we can run. 3101 _g_.m.p.ptr().syscalltick++ 3102 // We need to cas the status and scan before resuming... 3103 casgstatus(_g_, _Gsyscall, _Grunning) 3104 3105 // Garbage collector isn't running (since we are), 3106 // so okay to clear syscallsp. 3107 _g_.syscallsp = 0 3108 _g_.m.locks-- 3109 if _g_.preempt { 3110 // restore the preemption request in case we've cleared it in newstack 3111 _g_.stackguard0 = stackPreempt 3112 } else { 3113 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 3114 _g_.stackguard0 = _g_.stack.lo + _StackGuard 3115 } 3116 _g_.throwsplit = false 3117 3118 if sched.disable.user && !schedEnabled(_g_) { 3119 // Scheduling of this goroutine is disabled. 3120 Gosched() 3121 } 3122 3123 return 3124 } 3125 3126 _g_.sysexitticks = 0 3127 if trace.enabled { 3128 // Wait till traceGoSysBlock event is emitted. 3129 // This ensures consistency of the trace (the goroutine is started after it is blocked). 3130 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 3131 osyield() 3132 } 3133 // We can't trace syscall exit right now because we don't have a P. 3134 // Tracing code can invoke write barriers that cannot run without a P. 3135 // So instead we remember the syscall exit time and emit the event 3136 // in execute when we have a P. 3137 _g_.sysexitticks = cputicks() 3138 } 3139 3140 _g_.m.locks-- 3141 3142 // Call the scheduler. 3143 mcall(exitsyscall0) 3144 3145 if _g_.m.mcache == nil { 3146 throw("lost mcache") 3147 } 3148 3149 // Scheduler returned, so we're allowed to run now. 3150 // Delete the syscallsp information that we left for 3151 // the garbage collector during the system call. 3152 // Must wait until now because until gosched returns 3153 // we don't know for sure that the garbage collector 3154 // is not running. 3155 _g_.syscallsp = 0 3156 _g_.m.p.ptr().syscalltick++ 3157 _g_.throwsplit = false 3158 } 3159 3160 //go:nosplit 3161 func exitsyscallfast(oldp *p) bool { 3162 _g_ := getg() 3163 3164 // Freezetheworld sets stopwait but does not retake P's. 3165 if sched.stopwait == freezeStopWait { 3166 return false 3167 } 3168 3169 // Try to re-acquire the last P. 3170 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) { 3171 // There's a cpu for us, so we can run. 3172 wirep(oldp) 3173 exitsyscallfast_reacquired() 3174 return true 3175 } 3176 3177 // Try to get any other idle P. 3178 if sched.pidle != 0 { 3179 var ok bool 3180 systemstack(func() { 3181 ok = exitsyscallfast_pidle() 3182 if ok && trace.enabled { 3183 if oldp != nil { 3184 // Wait till traceGoSysBlock event is emitted. 3185 // This ensures consistency of the trace (the goroutine is started after it is blocked). 3186 for oldp.syscalltick == _g_.m.syscalltick { 3187 osyield() 3188 } 3189 } 3190 traceGoSysExit(0) 3191 } 3192 }) 3193 if ok { 3194 return true 3195 } 3196 } 3197 return false 3198 } 3199 3200 // exitsyscallfast_reacquired is the exitsyscall path on which this G 3201 // has successfully reacquired the P it was running on before the 3202 // syscall. 3203 // 3204 //go:nosplit 3205 func exitsyscallfast_reacquired() { 3206 _g_ := getg() 3207 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 3208 if trace.enabled { 3209 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 3210 // traceGoSysBlock for this syscall was already emitted, 3211 // but here we effectively retake the p from the new syscall running on the same p. 3212 systemstack(func() { 3213 // Denote blocking of the new syscall. 3214 traceGoSysBlock(_g_.m.p.ptr()) 3215 // Denote completion of the current syscall. 3216 traceGoSysExit(0) 3217 }) 3218 } 3219 _g_.m.p.ptr().syscalltick++ 3220 } 3221 } 3222 3223 func exitsyscallfast_pidle() bool { 3224 lock(&sched.lock) 3225 _p_ := pidleget() 3226 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 3227 atomic.Store(&sched.sysmonwait, 0) 3228 notewakeup(&sched.sysmonnote) 3229 } 3230 unlock(&sched.lock) 3231 if _p_ != nil { 3232 acquirep(_p_) 3233 return true 3234 } 3235 return false 3236 } 3237 3238 // exitsyscall slow path on g0. 3239 // Failed to acquire P, enqueue gp as runnable. 3240 // 3241 //go:nowritebarrierrec 3242 func exitsyscall0(gp *g) { 3243 _g_ := getg() 3244 3245 casgstatus(gp, _Gsyscall, _Grunnable) 3246 dropg() 3247 lock(&sched.lock) 3248 var _p_ *p 3249 if schedEnabled(_g_) { 3250 _p_ = pidleget() 3251 } 3252 if _p_ == nil { 3253 globrunqput(gp) 3254 } else if atomic.Load(&sched.sysmonwait) != 0 { 3255 atomic.Store(&sched.sysmonwait, 0) 3256 notewakeup(&sched.sysmonnote) 3257 } 3258 unlock(&sched.lock) 3259 if _p_ != nil { 3260 acquirep(_p_) 3261 execute(gp, false) // Never returns. 3262 } 3263 if _g_.m.lockedg != 0 { 3264 // Wait until another thread schedules gp and so m again. 3265 stoplockedm() 3266 execute(gp, false) // Never returns. 3267 } 3268 stopm() 3269 schedule() // Never returns. 3270 } 3271 3272 func beforefork() { 3273 gp := getg().m.curg 3274 3275 // Block signals during a fork, so that the child does not run 3276 // a signal handler before exec if a signal is sent to the process 3277 // group. See issue #18600. 3278 gp.m.locks++ 3279 msigsave(gp.m) 3280 sigblock() 3281 3282 // This function is called before fork in syscall package. 3283 // Code between fork and exec must not allocate memory nor even try to grow stack. 3284 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 3285 // runtime_AfterFork will undo this in parent process, but not in child. 3286 gp.stackguard0 = stackFork 3287 } 3288 3289 // Called from syscall package before fork. 3290 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 3291 //go:nosplit 3292 func syscall_runtime_BeforeFork() { 3293 systemstack(beforefork) 3294 } 3295 3296 func afterfork() { 3297 gp := getg().m.curg 3298 3299 // See the comments in beforefork. 3300 gp.stackguard0 = gp.stack.lo + _StackGuard 3301 3302 msigrestore(gp.m.sigmask) 3303 3304 gp.m.locks-- 3305 } 3306 3307 // Called from syscall package after fork in parent. 3308 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 3309 //go:nosplit 3310 func syscall_runtime_AfterFork() { 3311 systemstack(afterfork) 3312 } 3313 3314 // inForkedChild is true while manipulating signals in the child process. 3315 // This is used to avoid calling libc functions in case we are using vfork. 3316 var inForkedChild bool 3317 3318 // Called from syscall package after fork in child. 3319 // It resets non-sigignored signals to the default handler, and 3320 // restores the signal mask in preparation for the exec. 3321 // 3322 // Because this might be called during a vfork, and therefore may be 3323 // temporarily sharing address space with the parent process, this must 3324 // not change any global variables or calling into C code that may do so. 3325 // 3326 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild 3327 //go:nosplit 3328 //go:nowritebarrierrec 3329 func syscall_runtime_AfterForkInChild() { 3330 // It's OK to change the global variable inForkedChild here 3331 // because we are going to change it back. There is no race here, 3332 // because if we are sharing address space with the parent process, 3333 // then the parent process can not be running concurrently. 3334 inForkedChild = true 3335 3336 clearSignalHandlers() 3337 3338 // When we are the child we are the only thread running, 3339 // so we know that nothing else has changed gp.m.sigmask. 3340 msigrestore(getg().m.sigmask) 3341 3342 inForkedChild = false 3343 } 3344 3345 // Called from syscall package before Exec. 3346 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec 3347 func syscall_runtime_BeforeExec() { 3348 // Prevent thread creation during exec. 3349 execLock.lock() 3350 } 3351 3352 // Called from syscall package after Exec. 3353 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec 3354 func syscall_runtime_AfterExec() { 3355 execLock.unlock() 3356 } 3357 3358 // Allocate a new g, with a stack big enough for stacksize bytes. 3359 func malg(stacksize int32) *g { 3360 newg := new(g) 3361 if stacksize >= 0 { 3362 stacksize = round2(_StackSystem + stacksize) 3363 systemstack(func() { 3364 newg.stack = stackalloc(uint32(stacksize)) 3365 }) 3366 newg.stackguard0 = newg.stack.lo + _StackGuard 3367 newg.stackguard1 = ^uintptr(0) 3368 // Clear the bottom word of the stack. We record g 3369 // there on gsignal stack during VDSO on ARM and ARM64. 3370 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0 3371 } 3372 return newg 3373 } 3374 3375 // Create a new g running fn with siz bytes of arguments. 3376 // Put it on the queue of g's waiting to run. 3377 // The compiler turns a go statement into a call to this. 3378 // Cannot split the stack because it assumes that the arguments 3379 // are available sequentially after &fn; they would not be 3380 // copied if a stack split occurred. 3381 //go:nosplit 3382 func newproc(siz int32, fn *funcval) { 3383 argp := add(unsafe.Pointer(&fn), sys.PtrSize) 3384 gp := getg() 3385 pc := getcallerpc() 3386 systemstack(func() { 3387 newproc1(fn, argp, siz, gp, pc) 3388 }) 3389 } 3390 3391 // Create a new g running fn with narg bytes of arguments starting 3392 // at argp. callerpc is the address of the go statement that created 3393 // this. The new g is put on the queue of g's waiting to run. 3394 func newproc1(fn *funcval, argp unsafe.Pointer, narg int32, callergp *g, callerpc uintptr) { 3395 _g_ := getg() 3396 3397 if fn == nil { 3398 _g_.m.throwing = -1 // do not dump full stacks 3399 throw("go of nil func value") 3400 } 3401 acquirem() // disable preemption because it can be holding p in a local var 3402 siz := narg 3403 siz = (siz + 7) &^ 7 3404 3405 // We could allocate a larger initial stack if necessary. 3406 // Not worth it: this is almost always an error. 3407 // 4*sizeof(uintreg): extra space added below 3408 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 3409 if siz >= _StackMin-4*sys.RegSize-sys.RegSize { 3410 throw("newproc: function arguments too large for new goroutine") 3411 } 3412 3413 _p_ := _g_.m.p.ptr() 3414 newg := gfget(_p_) 3415 if newg == nil { 3416 newg = malg(_StackMin) 3417 casgstatus(newg, _Gidle, _Gdead) 3418 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 3419 } 3420 if newg.stack.hi == 0 { 3421 throw("newproc1: newg missing stack") 3422 } 3423 3424 if readgstatus(newg) != _Gdead { 3425 throw("newproc1: new g is not Gdead") 3426 } 3427 3428 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame 3429 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign 3430 sp := newg.stack.hi - totalSize 3431 spArg := sp 3432 if usesLR { 3433 // caller's LR 3434 *(*uintptr)(unsafe.Pointer(sp)) = 0 3435 prepGoExitFrame(sp) 3436 spArg += sys.MinFrameSize 3437 } 3438 if narg > 0 { 3439 memmove(unsafe.Pointer(spArg), argp, uintptr(narg)) 3440 // This is a stack-to-stack copy. If write barriers 3441 // are enabled and the source stack is grey (the 3442 // destination is always black), then perform a 3443 // barrier copy. We do this *after* the memmove 3444 // because the destination stack may have garbage on 3445 // it. 3446 if writeBarrier.needed && !_g_.m.curg.gcscandone { 3447 f := findfunc(fn.fn) 3448 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 3449 if stkmap.nbit > 0 { 3450 // We're in the prologue, so it's always stack map index 0. 3451 bv := stackmapdata(stkmap, 0) 3452 bulkBarrierBitmap(spArg, spArg, uintptr(bv.n)*sys.PtrSize, 0, bv.bytedata) 3453 } 3454 } 3455 } 3456 3457 ////// Add by q.bryant@live.com for logid @2020.09.10 ///////begain////// 3458 newg.logid = callergp.logid 3459 ////// Add by q.bryant@live.com for logid @2020.09.10 ///////end///////// 3460 3461 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 3462 newg.sched.sp = sp 3463 newg.stktopsp = sp 3464 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 3465 newg.sched.g = guintptr(unsafe.Pointer(newg)) 3466 gostartcallfn(&newg.sched, fn) 3467 newg.gopc = callerpc 3468 newg.ancestors = saveAncestors(callergp) 3469 newg.startpc = fn.fn 3470 if _g_.m.curg != nil { 3471 newg.labels = _g_.m.curg.labels 3472 } 3473 if isSystemGoroutine(newg, false) { 3474 atomic.Xadd(&sched.ngsys, +1) 3475 } 3476 casgstatus(newg, _Gdead, _Grunnable) 3477 3478 if _p_.goidcache == _p_.goidcacheend { 3479 // Sched.goidgen is the last allocated id, 3480 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 3481 // At startup sched.goidgen=0, so main goroutine receives goid=1. 3482 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 3483 _p_.goidcache -= _GoidCacheBatch - 1 3484 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 3485 } 3486 newg.goid = int64(_p_.goidcache) 3487 _p_.goidcache++ 3488 if raceenabled { 3489 newg.racectx = racegostart(callerpc) 3490 } 3491 if trace.enabled { 3492 traceGoCreate(newg, newg.startpc) 3493 } 3494 runqput(_p_, newg, true) 3495 3496 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted { 3497 wakep() 3498 } 3499 releasem(_g_.m) 3500 } 3501 3502 // saveAncestors copies previous ancestors of the given caller g and 3503 // includes infor for the current caller into a new set of tracebacks for 3504 // a g being created. 3505 func saveAncestors(callergp *g) *[]ancestorInfo { 3506 // Copy all prior info, except for the root goroutine (goid 0). 3507 if debug.tracebackancestors <= 0 || callergp.goid == 0 { 3508 return nil 3509 } 3510 var callerAncestors []ancestorInfo 3511 if callergp.ancestors != nil { 3512 callerAncestors = *callergp.ancestors 3513 } 3514 n := int32(len(callerAncestors)) + 1 3515 if n > debug.tracebackancestors { 3516 n = debug.tracebackancestors 3517 } 3518 ancestors := make([]ancestorInfo, n) 3519 copy(ancestors[1:], callerAncestors) 3520 3521 var pcs [_TracebackMaxFrames]uintptr 3522 npcs := gcallers(callergp, 0, pcs[:]) 3523 ipcs := make([]uintptr, npcs) 3524 copy(ipcs, pcs[:]) 3525 ancestors[0] = ancestorInfo{ 3526 pcs: ipcs, 3527 goid: callergp.goid, 3528 gopc: callergp.gopc, 3529 } 3530 3531 ancestorsp := new([]ancestorInfo) 3532 *ancestorsp = ancestors 3533 return ancestorsp 3534 } 3535 3536 // Put on gfree list. 3537 // If local list is too long, transfer a batch to the global list. 3538 func gfput(_p_ *p, gp *g) { 3539 if readgstatus(gp) != _Gdead { 3540 throw("gfput: bad status (not Gdead)") 3541 } 3542 3543 stksize := gp.stack.hi - gp.stack.lo 3544 3545 if stksize != _FixedStack { 3546 // non-standard stack size - free it. 3547 stackfree(gp.stack) 3548 gp.stack.lo = 0 3549 gp.stack.hi = 0 3550 gp.stackguard0 = 0 3551 } 3552 3553 _p_.gFree.push(gp) 3554 _p_.gFree.n++ 3555 if _p_.gFree.n >= 64 { 3556 lock(&sched.gFree.lock) 3557 for _p_.gFree.n >= 32 { 3558 _p_.gFree.n-- 3559 gp = _p_.gFree.pop() 3560 if gp.stack.lo == 0 { 3561 sched.gFree.noStack.push(gp) 3562 } else { 3563 sched.gFree.stack.push(gp) 3564 } 3565 sched.gFree.n++ 3566 } 3567 unlock(&sched.gFree.lock) 3568 } 3569 } 3570 3571 // Get from gfree list. 3572 // If local list is empty, grab a batch from global list. 3573 func gfget(_p_ *p) *g { 3574 retry: 3575 if _p_.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) { 3576 lock(&sched.gFree.lock) 3577 // Move a batch of free Gs to the P. 3578 for _p_.gFree.n < 32 { 3579 // Prefer Gs with stacks. 3580 gp := sched.gFree.stack.pop() 3581 if gp == nil { 3582 gp = sched.gFree.noStack.pop() 3583 if gp == nil { 3584 break 3585 } 3586 } 3587 sched.gFree.n-- 3588 _p_.gFree.push(gp) 3589 _p_.gFree.n++ 3590 } 3591 unlock(&sched.gFree.lock) 3592 goto retry 3593 } 3594 gp := _p_.gFree.pop() 3595 if gp == nil { 3596 return nil 3597 } 3598 _p_.gFree.n-- 3599 if gp.stack.lo == 0 { 3600 // Stack was deallocated in gfput. Allocate a new one. 3601 systemstack(func() { 3602 gp.stack = stackalloc(_FixedStack) 3603 }) 3604 gp.stackguard0 = gp.stack.lo + _StackGuard 3605 } else { 3606 if raceenabled { 3607 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3608 } 3609 if msanenabled { 3610 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3611 } 3612 } 3613 return gp 3614 } 3615 3616 // Purge all cached G's from gfree list to the global list. 3617 func gfpurge(_p_ *p) { 3618 lock(&sched.gFree.lock) 3619 for !_p_.gFree.empty() { 3620 gp := _p_.gFree.pop() 3621 _p_.gFree.n-- 3622 if gp.stack.lo == 0 { 3623 sched.gFree.noStack.push(gp) 3624 } else { 3625 sched.gFree.stack.push(gp) 3626 } 3627 sched.gFree.n++ 3628 } 3629 unlock(&sched.gFree.lock) 3630 } 3631 3632 // Breakpoint executes a breakpoint trap. 3633 func Breakpoint() { 3634 breakpoint() 3635 } 3636 3637 // dolockOSThread is called by LockOSThread and lockOSThread below 3638 // after they modify m.locked. Do not allow preemption during this call, 3639 // or else the m might be different in this function than in the caller. 3640 //go:nosplit 3641 func dolockOSThread() { 3642 if GOARCH == "wasm" { 3643 return // no threads on wasm yet 3644 } 3645 _g_ := getg() 3646 _g_.m.lockedg.set(_g_) 3647 _g_.lockedm.set(_g_.m) 3648 } 3649 3650 //go:nosplit 3651 3652 // LockOSThread wires the calling goroutine to its current operating system thread. 3653 // The calling goroutine will always execute in that thread, 3654 // and no other goroutine will execute in it, 3655 // until the calling goroutine has made as many calls to 3656 // UnlockOSThread as to LockOSThread. 3657 // If the calling goroutine exits without unlocking the thread, 3658 // the thread will be terminated. 3659 // 3660 // All init functions are run on the startup thread. Calling LockOSThread 3661 // from an init function will cause the main function to be invoked on 3662 // that thread. 3663 // 3664 // A goroutine should call LockOSThread before calling OS services or 3665 // non-Go library functions that depend on per-thread state. 3666 func LockOSThread() { 3667 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" { 3668 // If we need to start a new thread from the locked 3669 // thread, we need the template thread. Start it now 3670 // while we're in a known-good state. 3671 startTemplateThread() 3672 } 3673 _g_ := getg() 3674 _g_.m.lockedExt++ 3675 if _g_.m.lockedExt == 0 { 3676 _g_.m.lockedExt-- 3677 panic("LockOSThread nesting overflow") 3678 } 3679 dolockOSThread() 3680 } 3681 3682 //go:nosplit 3683 func lockOSThread() { 3684 getg().m.lockedInt++ 3685 dolockOSThread() 3686 } 3687 3688 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 3689 // after they update m->locked. Do not allow preemption during this call, 3690 // or else the m might be in different in this function than in the caller. 3691 //go:nosplit 3692 func dounlockOSThread() { 3693 if GOARCH == "wasm" { 3694 return // no threads on wasm yet 3695 } 3696 _g_ := getg() 3697 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 { 3698 return 3699 } 3700 _g_.m.lockedg = 0 3701 _g_.lockedm = 0 3702 } 3703 3704 //go:nosplit 3705 3706 // UnlockOSThread undoes an earlier call to LockOSThread. 3707 // If this drops the number of active LockOSThread calls on the 3708 // calling goroutine to zero, it unwires the calling goroutine from 3709 // its fixed operating system thread. 3710 // If there are no active LockOSThread calls, this is a no-op. 3711 // 3712 // Before calling UnlockOSThread, the caller must ensure that the OS 3713 // thread is suitable for running other goroutines. If the caller made 3714 // any permanent changes to the state of the thread that would affect 3715 // other goroutines, it should not call this function and thus leave 3716 // the goroutine locked to the OS thread until the goroutine (and 3717 // hence the thread) exits. 3718 func UnlockOSThread() { 3719 _g_ := getg() 3720 if _g_.m.lockedExt == 0 { 3721 return 3722 } 3723 _g_.m.lockedExt-- 3724 dounlockOSThread() 3725 } 3726 3727 //go:nosplit 3728 func unlockOSThread() { 3729 _g_ := getg() 3730 if _g_.m.lockedInt == 0 { 3731 systemstack(badunlockosthread) 3732 } 3733 _g_.m.lockedInt-- 3734 dounlockOSThread() 3735 } 3736 3737 func badunlockosthread() { 3738 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 3739 } 3740 3741 func gcount() int32 { 3742 n := int32(allglen) - sched.gFree.n - int32(atomic.Load(&sched.ngsys)) 3743 for _, _p_ := range allp { 3744 n -= _p_.gFree.n 3745 } 3746 3747 // All these variables can be changed concurrently, so the result can be inconsistent. 3748 // But at least the current goroutine is running. 3749 if n < 1 { 3750 n = 1 3751 } 3752 return n 3753 } 3754 3755 func mcount() int32 { 3756 return int32(sched.mnext - sched.nmfreed) 3757 } 3758 3759 var prof struct { 3760 signalLock uint32 3761 hz int32 3762 } 3763 3764 func _System() { _System() } 3765 func _ExternalCode() { _ExternalCode() } 3766 func _LostExternalCode() { _LostExternalCode() } 3767 func _GC() { _GC() } 3768 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } 3769 func _VDSO() { _VDSO() } 3770 3771 // Called if we receive a SIGPROF signal. 3772 // Called by the signal handler, may run during STW. 3773 //go:nowritebarrierrec 3774 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 3775 if prof.hz == 0 { 3776 return 3777 } 3778 3779 // On mips{,le}, 64bit atomics are emulated with spinlocks, in 3780 // runtime/internal/atomic. If SIGPROF arrives while the program is inside 3781 // the critical section, it creates a deadlock (when writing the sample). 3782 // As a workaround, create a counter of SIGPROFs while in critical section 3783 // to store the count, and pass it to sigprof.add() later when SIGPROF is 3784 // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc). 3785 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" { 3786 if f := findfunc(pc); f.valid() { 3787 if hasPrefix(funcname(f), "runtime/internal/atomic") { 3788 cpuprof.lostAtomic++ 3789 return 3790 } 3791 } 3792 } 3793 3794 // Profiling runs concurrently with GC, so it must not allocate. 3795 // Set a trap in case the code does allocate. 3796 // Note that on windows, one thread takes profiles of all the 3797 // other threads, so mp is usually not getg().m. 3798 // In fact mp may not even be stopped. 3799 // See golang.org/issue/17165. 3800 getg().m.mallocing++ 3801 3802 // Define that a "user g" is a user-created goroutine, and a "system g" 3803 // is one that is m->g0 or m->gsignal. 3804 // 3805 // We might be interrupted for profiling halfway through a 3806 // goroutine switch. The switch involves updating three (or four) values: 3807 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 3808 // because once it gets updated the new g is running. 3809 // 3810 // When switching from a user g to a system g, LR is not considered live, 3811 // so the update only affects g, SP, and PC. Since PC must be last, there 3812 // the possible partial transitions in ordinary execution are (1) g alone is updated, 3813 // (2) both g and SP are updated, and (3) SP alone is updated. 3814 // If SP or g alone is updated, we can detect the partial transition by checking 3815 // whether the SP is within g's stack bounds. (We could also require that SP 3816 // be changed only after g, but the stack bounds check is needed by other 3817 // cases, so there is no need to impose an additional requirement.) 3818 // 3819 // There is one exceptional transition to a system g, not in ordinary execution. 3820 // When a signal arrives, the operating system starts the signal handler running 3821 // with an updated PC and SP. The g is updated last, at the beginning of the 3822 // handler. There are two reasons this is okay. First, until g is updated the 3823 // g and SP do not match, so the stack bounds check detects the partial transition. 3824 // Second, signal handlers currently run with signals disabled, so a profiling 3825 // signal cannot arrive during the handler. 3826 // 3827 // When switching from a system g to a user g, there are three possibilities. 3828 // 3829 // First, it may be that the g switch has no PC update, because the SP 3830 // either corresponds to a user g throughout (as in asmcgocall) 3831 // or because it has been arranged to look like a user g frame 3832 // (as in cgocallback_gofunc). In this case, since the entire 3833 // transition is a g+SP update, a partial transition updating just one of 3834 // those will be detected by the stack bounds check. 3835 // 3836 // Second, when returning from a signal handler, the PC and SP updates 3837 // are performed by the operating system in an atomic update, so the g 3838 // update must be done before them. The stack bounds check detects 3839 // the partial transition here, and (again) signal handlers run with signals 3840 // disabled, so a profiling signal cannot arrive then anyway. 3841 // 3842 // Third, the common case: it may be that the switch updates g, SP, and PC 3843 // separately. If the PC is within any of the functions that does this, 3844 // we don't ask for a traceback. C.F. the function setsSP for more about this. 3845 // 3846 // There is another apparently viable approach, recorded here in case 3847 // the "PC within setsSP function" check turns out not to be usable. 3848 // It would be possible to delay the update of either g or SP until immediately 3849 // before the PC update instruction. Then, because of the stack bounds check, 3850 // the only problematic interrupt point is just before that PC update instruction, 3851 // and the sigprof handler can detect that instruction and simulate stepping past 3852 // it in order to reach a consistent state. On ARM, the update of g must be made 3853 // in two places (in R10 and also in a TLS slot), so the delayed update would 3854 // need to be the SP update. The sigprof handler must read the instruction at 3855 // the current PC and if it was the known instruction (for example, JMP BX or 3856 // MOV R2, PC), use that other register in place of the PC value. 3857 // The biggest drawback to this solution is that it requires that we can tell 3858 // whether it's safe to read from the memory pointed at by PC. 3859 // In a correct program, we can test PC == nil and otherwise read, 3860 // but if a profiling signal happens at the instant that a program executes 3861 // a bad jump (before the program manages to handle the resulting fault) 3862 // the profiling handler could fault trying to read nonexistent memory. 3863 // 3864 // To recap, there are no constraints on the assembly being used for the 3865 // transition. We simply require that g and SP match and that the PC is not 3866 // in gogo. 3867 traceback := true 3868 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) || (mp != nil && mp.vdsoSP != 0) { 3869 traceback = false 3870 } 3871 var stk [maxCPUProfStack]uintptr 3872 n := 0 3873 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 3874 cgoOff := 0 3875 // Check cgoCallersUse to make sure that we are not 3876 // interrupting other code that is fiddling with 3877 // cgoCallers. We are running in a signal handler 3878 // with all signals blocked, so we don't have to worry 3879 // about any other code interrupting us. 3880 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 { 3881 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { 3882 cgoOff++ 3883 } 3884 copy(stk[:], mp.cgoCallers[:cgoOff]) 3885 mp.cgoCallers[0] = 0 3886 } 3887 3888 // Collect Go stack that leads to the cgo call. 3889 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0) 3890 if n > 0 { 3891 n += cgoOff 3892 } 3893 } else if traceback { 3894 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 3895 } 3896 3897 if n <= 0 { 3898 // Normal traceback is impossible or has failed. 3899 // See if it falls into several common cases. 3900 n = 0 3901 if (GOOS == "windows" || GOOS == "solaris" || GOOS == "illumos" || GOOS == "darwin" || GOOS == "aix") && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 3902 // Libcall, i.e. runtime syscall on windows. 3903 // Collect Go stack that leads to the call. 3904 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 3905 } 3906 if n == 0 && mp != nil && mp.vdsoSP != 0 { 3907 n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 3908 } 3909 if n == 0 { 3910 // If all of the above has failed, account it against abstract "System" or "GC". 3911 n = 2 3912 if inVDSOPage(pc) { 3913 pc = funcPC(_VDSO) + sys.PCQuantum 3914 } else if pc > firstmoduledata.etext { 3915 // "ExternalCode" is better than "etext". 3916 pc = funcPC(_ExternalCode) + sys.PCQuantum 3917 } 3918 stk[0] = pc 3919 if mp.preemptoff != "" { 3920 stk[1] = funcPC(_GC) + sys.PCQuantum 3921 } else { 3922 stk[1] = funcPC(_System) + sys.PCQuantum 3923 } 3924 } 3925 } 3926 3927 if prof.hz != 0 { 3928 cpuprof.add(gp, stk[:n]) 3929 } 3930 getg().m.mallocing-- 3931 } 3932 3933 // If the signal handler receives a SIGPROF signal on a non-Go thread, 3934 // it tries to collect a traceback into sigprofCallers. 3935 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback. 3936 var sigprofCallers cgoCallers 3937 var sigprofCallersUse uint32 3938 3939 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, 3940 // and the signal handler collected a stack trace in sigprofCallers. 3941 // When this is called, sigprofCallersUse will be non-zero. 3942 // g is nil, and what we can do is very limited. 3943 //go:nosplit 3944 //go:nowritebarrierrec 3945 func sigprofNonGo() { 3946 if prof.hz != 0 { 3947 n := 0 3948 for n < len(sigprofCallers) && sigprofCallers[n] != 0 { 3949 n++ 3950 } 3951 cpuprof.addNonGo(sigprofCallers[:n]) 3952 } 3953 3954 atomic.Store(&sigprofCallersUse, 0) 3955 } 3956 3957 // sigprofNonGoPC is called when a profiling signal arrived on a 3958 // non-Go thread and we have a single PC value, not a stack trace. 3959 // g is nil, and what we can do is very limited. 3960 //go:nosplit 3961 //go:nowritebarrierrec 3962 func sigprofNonGoPC(pc uintptr) { 3963 if prof.hz != 0 { 3964 stk := []uintptr{ 3965 pc, 3966 funcPC(_ExternalCode) + sys.PCQuantum, 3967 } 3968 cpuprof.addNonGo(stk) 3969 } 3970 } 3971 3972 // Reports whether a function will set the SP 3973 // to an absolute value. Important that 3974 // we don't traceback when these are at the bottom 3975 // of the stack since we can't be sure that we will 3976 // find the caller. 3977 // 3978 // If the function is not on the bottom of the stack 3979 // we assume that it will have set it up so that traceback will be consistent, 3980 // either by being a traceback terminating function 3981 // or putting one on the stack at the right offset. 3982 func setsSP(pc uintptr) bool { 3983 f := findfunc(pc) 3984 if !f.valid() { 3985 // couldn't find the function for this PC, 3986 // so assume the worst and stop traceback 3987 return true 3988 } 3989 switch f.funcID { 3990 case funcID_gogo, funcID_systemstack, funcID_mcall, funcID_morestack: 3991 return true 3992 } 3993 return false 3994 } 3995 3996 // setcpuprofilerate sets the CPU profiling rate to hz times per second. 3997 // If hz <= 0, setcpuprofilerate turns off CPU profiling. 3998 func setcpuprofilerate(hz int32) { 3999 // Force sane arguments. 4000 if hz < 0 { 4001 hz = 0 4002 } 4003 4004 // Disable preemption, otherwise we can be rescheduled to another thread 4005 // that has profiling enabled. 4006 _g_ := getg() 4007 _g_.m.locks++ 4008 4009 // Stop profiler on this thread so that it is safe to lock prof. 4010 // if a profiling signal came in while we had prof locked, 4011 // it would deadlock. 4012 setThreadCPUProfiler(0) 4013 4014 for !atomic.Cas(&prof.signalLock, 0, 1) { 4015 osyield() 4016 } 4017 if prof.hz != hz { 4018 setProcessCPUProfiler(hz) 4019 prof.hz = hz 4020 } 4021 atomic.Store(&prof.signalLock, 0) 4022 4023 lock(&sched.lock) 4024 sched.profilehz = hz 4025 unlock(&sched.lock) 4026 4027 if hz != 0 { 4028 setThreadCPUProfiler(hz) 4029 } 4030 4031 _g_.m.locks-- 4032 } 4033 4034 // init initializes pp, which may be a freshly allocated p or a 4035 // previously destroyed p, and transitions it to status _Pgcstop. 4036 func (pp *p) init(id int32) { 4037 pp.id = id 4038 pp.status = _Pgcstop 4039 pp.sudogcache = pp.sudogbuf[:0] 4040 for i := range pp.deferpool { 4041 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 4042 } 4043 pp.wbBuf.reset() 4044 if pp.mcache == nil { 4045 if id == 0 { 4046 if getg().m.mcache == nil { 4047 throw("missing mcache?") 4048 } 4049 pp.mcache = getg().m.mcache // bootstrap 4050 } else { 4051 pp.mcache = allocmcache() 4052 } 4053 } 4054 if raceenabled && pp.raceprocctx == 0 { 4055 if id == 0 { 4056 pp.raceprocctx = raceprocctx0 4057 raceprocctx0 = 0 // bootstrap 4058 } else { 4059 pp.raceprocctx = raceproccreate() 4060 } 4061 } 4062 } 4063 4064 // destroy releases all of the resources associated with pp and 4065 // transitions it to status _Pdead. 4066 // 4067 // sched.lock must be held and the world must be stopped. 4068 func (pp *p) destroy() { 4069 // Move all runnable goroutines to the global queue 4070 for pp.runqhead != pp.runqtail { 4071 // Pop from tail of local queue 4072 pp.runqtail-- 4073 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr() 4074 // Push onto head of global queue 4075 globrunqputhead(gp) 4076 } 4077 if pp.runnext != 0 { 4078 globrunqputhead(pp.runnext.ptr()) 4079 pp.runnext = 0 4080 } 4081 if len(pp.timers) > 0 { 4082 plocal := getg().m.p.ptr() 4083 // The world is stopped, but we acquire timersLock to 4084 // protect against sysmon calling timeSleepUntil. 4085 // This is the only case where we hold the timersLock of 4086 // more than one P, so there are no deadlock concerns. 4087 lock(&plocal.timersLock) 4088 lock(&pp.timersLock) 4089 moveTimers(plocal, pp.timers) 4090 pp.timers = nil 4091 pp.numTimers = 0 4092 pp.adjustTimers = 0 4093 pp.deletedTimers = 0 4094 atomic.Store64(&pp.timer0When, 0) 4095 unlock(&pp.timersLock) 4096 unlock(&plocal.timersLock) 4097 } 4098 // If there's a background worker, make it runnable and put 4099 // it on the global queue so it can clean itself up. 4100 if gp := pp.gcBgMarkWorker.ptr(); gp != nil { 4101 casgstatus(gp, _Gwaiting, _Grunnable) 4102 if trace.enabled { 4103 traceGoUnpark(gp, 0) 4104 } 4105 globrunqput(gp) 4106 // This assignment doesn't race because the 4107 // world is stopped. 4108 pp.gcBgMarkWorker.set(nil) 4109 } 4110 // Flush p's write barrier buffer. 4111 if gcphase != _GCoff { 4112 wbBufFlush1(pp) 4113 pp.gcw.dispose() 4114 } 4115 for i := range pp.sudogbuf { 4116 pp.sudogbuf[i] = nil 4117 } 4118 pp.sudogcache = pp.sudogbuf[:0] 4119 for i := range pp.deferpool { 4120 for j := range pp.deferpoolbuf[i] { 4121 pp.deferpoolbuf[i][j] = nil 4122 } 4123 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 4124 } 4125 systemstack(func() { 4126 for i := 0; i < pp.mspancache.len; i++ { 4127 // Safe to call since the world is stopped. 4128 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i])) 4129 } 4130 pp.mspancache.len = 0 4131 pp.pcache.flush(&mheap_.pages) 4132 }) 4133 freemcache(pp.mcache) 4134 pp.mcache = nil 4135 gfpurge(pp) 4136 traceProcFree(pp) 4137 if raceenabled { 4138 if pp.timerRaceCtx != 0 { 4139 // The race detector code uses a callback to fetch 4140 // the proc context, so arrange for that callback 4141 // to see the right thing. 4142 // This hack only works because we are the only 4143 // thread running. 4144 mp := getg().m 4145 phold := mp.p.ptr() 4146 mp.p.set(pp) 4147 4148 racectxend(pp.timerRaceCtx) 4149 pp.timerRaceCtx = 0 4150 4151 mp.p.set(phold) 4152 } 4153 raceprocdestroy(pp.raceprocctx) 4154 pp.raceprocctx = 0 4155 } 4156 pp.gcAssistTime = 0 4157 pp.status = _Pdead 4158 } 4159 4160 // Change number of processors. The world is stopped, sched is locked. 4161 // gcworkbufs are not being modified by either the GC or 4162 // the write barrier code. 4163 // Returns list of Ps with local work, they need to be scheduled by the caller. 4164 func procresize(nprocs int32) *p { 4165 old := gomaxprocs 4166 if old < 0 || nprocs <= 0 { 4167 throw("procresize: invalid arg") 4168 } 4169 if trace.enabled { 4170 traceGomaxprocs(nprocs) 4171 } 4172 4173 // update statistics 4174 now := nanotime() 4175 if sched.procresizetime != 0 { 4176 sched.totaltime += int64(old) * (now - sched.procresizetime) 4177 } 4178 sched.procresizetime = now 4179 4180 // Grow allp if necessary. 4181 if nprocs > int32(len(allp)) { 4182 // Synchronize with retake, which could be running 4183 // concurrently since it doesn't run on a P. 4184 lock(&allpLock) 4185 if nprocs <= int32(cap(allp)) { 4186 allp = allp[:nprocs] 4187 } else { 4188 nallp := make([]*p, nprocs) 4189 // Copy everything up to allp's cap so we 4190 // never lose old allocated Ps. 4191 copy(nallp, allp[:cap(allp)]) 4192 allp = nallp 4193 } 4194 unlock(&allpLock) 4195 } 4196 4197 // initialize new P's 4198 for i := old; i < nprocs; i++ { 4199 pp := allp[i] 4200 if pp == nil { 4201 pp = new(p) 4202 } 4203 pp.init(i) 4204 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 4205 } 4206 4207 _g_ := getg() 4208 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 4209 // continue to use the current P 4210 _g_.m.p.ptr().status = _Prunning 4211 _g_.m.p.ptr().mcache.prepareForSweep() 4212 } else { 4213 // release the current P and acquire allp[0]. 4214 // 4215 // We must do this before destroying our current P 4216 // because p.destroy itself has write barriers, so we 4217 // need to do that from a valid P. 4218 if _g_.m.p != 0 { 4219 if trace.enabled { 4220 // Pretend that we were descheduled 4221 // and then scheduled again to keep 4222 // the trace sane. 4223 traceGoSched() 4224 traceProcStop(_g_.m.p.ptr()) 4225 } 4226 _g_.m.p.ptr().m = 0 4227 } 4228 _g_.m.p = 0 4229 _g_.m.mcache = nil 4230 p := allp[0] 4231 p.m = 0 4232 p.status = _Pidle 4233 acquirep(p) 4234 if trace.enabled { 4235 traceGoStart() 4236 } 4237 } 4238 4239 // release resources from unused P's 4240 for i := nprocs; i < old; i++ { 4241 p := allp[i] 4242 p.destroy() 4243 // can't free P itself because it can be referenced by an M in syscall 4244 } 4245 4246 // Trim allp. 4247 if int32(len(allp)) != nprocs { 4248 lock(&allpLock) 4249 allp = allp[:nprocs] 4250 unlock(&allpLock) 4251 } 4252 4253 var runnablePs *p 4254 for i := nprocs - 1; i >= 0; i-- { 4255 p := allp[i] 4256 if _g_.m.p.ptr() == p { 4257 continue 4258 } 4259 p.status = _Pidle 4260 if runqempty(p) { 4261 pidleput(p) 4262 } else { 4263 p.m.set(mget()) 4264 p.link.set(runnablePs) 4265 runnablePs = p 4266 } 4267 } 4268 stealOrder.reset(uint32(nprocs)) 4269 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 4270 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 4271 return runnablePs 4272 } 4273 4274 // Associate p and the current m. 4275 // 4276 // This function is allowed to have write barriers even if the caller 4277 // isn't because it immediately acquires _p_. 4278 // 4279 //go:yeswritebarrierrec 4280 func acquirep(_p_ *p) { 4281 // Do the part that isn't allowed to have write barriers. 4282 wirep(_p_) 4283 4284 // Have p; write barriers now allowed. 4285 4286 // Perform deferred mcache flush before this P can allocate 4287 // from a potentially stale mcache. 4288 _p_.mcache.prepareForSweep() 4289 4290 if trace.enabled { 4291 traceProcStart() 4292 } 4293 } 4294 4295 // wirep is the first step of acquirep, which actually associates the 4296 // current M to _p_. This is broken out so we can disallow write 4297 // barriers for this part, since we don't yet have a P. 4298 // 4299 //go:nowritebarrierrec 4300 //go:nosplit 4301 func wirep(_p_ *p) { 4302 _g_ := getg() 4303 4304 if _g_.m.p != 0 || _g_.m.mcache != nil { 4305 throw("wirep: already in go") 4306 } 4307 if _p_.m != 0 || _p_.status != _Pidle { 4308 id := int64(0) 4309 if _p_.m != 0 { 4310 id = _p_.m.ptr().id 4311 } 4312 print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 4313 throw("wirep: invalid p state") 4314 } 4315 _g_.m.mcache = _p_.mcache 4316 _g_.m.p.set(_p_) 4317 _p_.m.set(_g_.m) 4318 _p_.status = _Prunning 4319 } 4320 4321 // Disassociate p and the current m. 4322 func releasep() *p { 4323 _g_ := getg() 4324 4325 if _g_.m.p == 0 || _g_.m.mcache == nil { 4326 throw("releasep: invalid arg") 4327 } 4328 _p_ := _g_.m.p.ptr() 4329 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 4330 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 4331 throw("releasep: invalid p state") 4332 } 4333 if trace.enabled { 4334 traceProcStop(_g_.m.p.ptr()) 4335 } 4336 _g_.m.p = 0 4337 _g_.m.mcache = nil 4338 _p_.m = 0 4339 _p_.status = _Pidle 4340 return _p_ 4341 } 4342 4343 func incidlelocked(v int32) { 4344 lock(&sched.lock) 4345 sched.nmidlelocked += v 4346 if v > 0 { 4347 checkdead() 4348 } 4349 unlock(&sched.lock) 4350 } 4351 4352 // Check for deadlock situation. 4353 // The check is based on number of running M's, if 0 -> deadlock. 4354 // sched.lock must be held. 4355 func checkdead() { 4356 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 4357 // there are no running goroutines. The calling program is 4358 // assumed to be running. 4359 if islibrary || isarchive { 4360 return 4361 } 4362 4363 // If we are dying because of a signal caught on an already idle thread, 4364 // freezetheworld will cause all running threads to block. 4365 // And runtime will essentially enter into deadlock state, 4366 // except that there is a thread that will call exit soon. 4367 if panicking > 0 { 4368 return 4369 } 4370 4371 // If we are not running under cgo, but we have an extra M then account 4372 // for it. (It is possible to have an extra M on Windows without cgo to 4373 // accommodate callbacks created by syscall.NewCallback. See issue #6751 4374 // for details.) 4375 var run0 int32 4376 if !iscgo && cgoHasExtraM { 4377 mp := lockextra(true) 4378 haveExtraM := extraMCount > 0 4379 unlockextra(mp) 4380 if haveExtraM { 4381 run0 = 1 4382 } 4383 } 4384 4385 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys 4386 if run > run0 { 4387 return 4388 } 4389 if run < 0 { 4390 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n") 4391 throw("checkdead: inconsistent counts") 4392 } 4393 4394 grunning := 0 4395 lock(&allglock) 4396 for i := 0; i < len(allgs); i++ { 4397 gp := allgs[i] 4398 if isSystemGoroutine(gp, false) { 4399 continue 4400 } 4401 s := readgstatus(gp) 4402 switch s &^ _Gscan { 4403 case _Gwaiting, 4404 _Gpreempted: 4405 grunning++ 4406 case _Grunnable, 4407 _Grunning, 4408 _Gsyscall: 4409 unlock(&allglock) 4410 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 4411 throw("checkdead: runnable g") 4412 } 4413 } 4414 unlock(&allglock) 4415 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 4416 unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang 4417 throw("no goroutines (main called runtime.Goexit) - deadlock!") 4418 } 4419 4420 // Maybe jump time forward for playground. 4421 if faketime != 0 { 4422 when, _p_ := timeSleepUntil() 4423 if _p_ != nil { 4424 faketime = when 4425 for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link { 4426 if (*pp).ptr() == _p_ { 4427 *pp = _p_.link 4428 break 4429 } 4430 } 4431 mp := mget() 4432 if mp == nil { 4433 // There should always be a free M since 4434 // nothing is running. 4435 throw("checkdead: no m for timer") 4436 } 4437 mp.nextp.set(_p_) 4438 notewakeup(&mp.park) 4439 return 4440 } 4441 } 4442 4443 // There are no goroutines running, so we can look at the P's. 4444 for _, _p_ := range allp { 4445 if len(_p_.timers) > 0 { 4446 return 4447 } 4448 } 4449 4450 getg().m.throwing = -1 // do not dump full stacks 4451 unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang 4452 throw("all goroutines are asleep - deadlock!") 4453 } 4454 4455 // forcegcperiod is the maximum time in nanoseconds between garbage 4456 // collections. If we go this long without a garbage collection, one 4457 // is forced to run. 4458 // 4459 // This is a variable for testing purposes. It normally doesn't change. 4460 var forcegcperiod int64 = 2 * 60 * 1e9 4461 4462 // Always runs without a P, so write barriers are not allowed. 4463 // 4464 //go:nowritebarrierrec 4465 func sysmon() { 4466 lock(&sched.lock) 4467 sched.nmsys++ 4468 checkdead() 4469 unlock(&sched.lock) 4470 4471 lasttrace := int64(0) 4472 idle := 0 // how many cycles in succession we had not wokeup somebody 4473 delay := uint32(0) 4474 for { 4475 if idle == 0 { // start with 20us sleep... 4476 delay = 20 4477 } else if idle > 50 { // start doubling the sleep after 1ms... 4478 delay *= 2 4479 } 4480 if delay > 10*1000 { // up to 10ms 4481 delay = 10 * 1000 4482 } 4483 usleep(delay) 4484 now := nanotime() 4485 next, _ := timeSleepUntil() 4486 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { 4487 lock(&sched.lock) 4488 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 4489 if next > now { 4490 atomic.Store(&sched.sysmonwait, 1) 4491 unlock(&sched.lock) 4492 // Make wake-up period small enough 4493 // for the sampling to be correct. 4494 sleep := forcegcperiod / 2 4495 if next-now < sleep { 4496 sleep = next - now 4497 } 4498 shouldRelax := sleep >= osRelaxMinNS 4499 if shouldRelax { 4500 osRelax(true) 4501 } 4502 notetsleep(&sched.sysmonnote, sleep) 4503 if shouldRelax { 4504 osRelax(false) 4505 } 4506 now = nanotime() 4507 next, _ = timeSleepUntil() 4508 lock(&sched.lock) 4509 atomic.Store(&sched.sysmonwait, 0) 4510 noteclear(&sched.sysmonnote) 4511 } 4512 idle = 0 4513 delay = 20 4514 } 4515 unlock(&sched.lock) 4516 } 4517 // trigger libc interceptors if needed 4518 if *cgo_yield != nil { 4519 asmcgocall(*cgo_yield, nil) 4520 } 4521 // poll network if not polled for more than 10ms 4522 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 4523 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now { 4524 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 4525 list := netpoll(0) // non-blocking - returns list of goroutines 4526 if !list.empty() { 4527 // Need to decrement number of idle locked M's 4528 // (pretending that one more is running) before injectglist. 4529 // Otherwise it can lead to the following situation: 4530 // injectglist grabs all P's but before it starts M's to run the P's, 4531 // another M returns from syscall, finishes running its G, 4532 // observes that there is no work to do and no other running M's 4533 // and reports deadlock. 4534 incidlelocked(-1) 4535 injectglist(&list) 4536 incidlelocked(1) 4537 } 4538 } 4539 if next < now { 4540 // There are timers that should have already run, 4541 // perhaps because there is an unpreemptible P. 4542 // Try to start an M to run them. 4543 startm(nil, false) 4544 } 4545 // retake P's blocked in syscalls 4546 // and preempt long running G's 4547 if retake(now) != 0 { 4548 idle = 0 4549 } else { 4550 idle++ 4551 } 4552 // check if we need to force a GC 4553 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { 4554 lock(&forcegc.lock) 4555 forcegc.idle = 0 4556 var list gList 4557 list.push(forcegc.g) 4558 injectglist(&list) 4559 unlock(&forcegc.lock) 4560 } 4561 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { 4562 lasttrace = now 4563 schedtrace(debug.scheddetail > 0) 4564 } 4565 } 4566 } 4567 4568 type sysmontick struct { 4569 schedtick uint32 4570 schedwhen int64 4571 syscalltick uint32 4572 syscallwhen int64 4573 } 4574 4575 // forcePreemptNS is the time slice given to a G before it is 4576 // preempted. 4577 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 4578 4579 func retake(now int64) uint32 { 4580 n := 0 4581 // Prevent allp slice changes. This lock will be completely 4582 // uncontended unless we're already stopping the world. 4583 lock(&allpLock) 4584 // We can't use a range loop over allp because we may 4585 // temporarily drop the allpLock. Hence, we need to re-fetch 4586 // allp each time around the loop. 4587 for i := 0; i < len(allp); i++ { 4588 _p_ := allp[i] 4589 if _p_ == nil { 4590 // This can happen if procresize has grown 4591 // allp but not yet created new Ps. 4592 continue 4593 } 4594 pd := &_p_.sysmontick 4595 s := _p_.status 4596 sysretake := false 4597 if s == _Prunning || s == _Psyscall { 4598 // Preempt G if it's running for too long. 4599 t := int64(_p_.schedtick) 4600 if int64(pd.schedtick) != t { 4601 pd.schedtick = uint32(t) 4602 pd.schedwhen = now 4603 } else if pd.schedwhen+forcePreemptNS <= now { 4604 preemptone(_p_) 4605 // In case of syscall, preemptone() doesn't 4606 // work, because there is no M wired to P. 4607 sysretake = true 4608 } 4609 } 4610 if s == _Psyscall { 4611 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 4612 t := int64(_p_.syscalltick) 4613 if !sysretake && int64(pd.syscalltick) != t { 4614 pd.syscalltick = uint32(t) 4615 pd.syscallwhen = now 4616 continue 4617 } 4618 // On the one hand we don't want to retake Ps if there is no other work to do, 4619 // but on the other hand we want to retake them eventually 4620 // because they can prevent the sysmon thread from deep sleep. 4621 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 4622 continue 4623 } 4624 // Drop allpLock so we can take sched.lock. 4625 unlock(&allpLock) 4626 // Need to decrement number of idle locked M's 4627 // (pretending that one more is running) before the CAS. 4628 // Otherwise the M from which we retake can exit the syscall, 4629 // increment nmidle and report deadlock. 4630 incidlelocked(-1) 4631 if atomic.Cas(&_p_.status, s, _Pidle) { 4632 if trace.enabled { 4633 traceGoSysBlock(_p_) 4634 traceProcStop(_p_) 4635 } 4636 n++ 4637 _p_.syscalltick++ 4638 handoffp(_p_) 4639 } 4640 incidlelocked(1) 4641 lock(&allpLock) 4642 } 4643 } 4644 unlock(&allpLock) 4645 return uint32(n) 4646 } 4647 4648 // Tell all goroutines that they have been preempted and they should stop. 4649 // This function is purely best-effort. It can fail to inform a goroutine if a 4650 // processor just started running it. 4651 // No locks need to be held. 4652 // Returns true if preemption request was issued to at least one goroutine. 4653 func preemptall() bool { 4654 res := false 4655 for _, _p_ := range allp { 4656 if _p_.status != _Prunning { 4657 continue 4658 } 4659 if preemptone(_p_) { 4660 res = true 4661 } 4662 } 4663 return res 4664 } 4665 4666 // Tell the goroutine running on processor P to stop. 4667 // This function is purely best-effort. It can incorrectly fail to inform the 4668 // goroutine. It can send inform the wrong goroutine. Even if it informs the 4669 // correct goroutine, that goroutine might ignore the request if it is 4670 // simultaneously executing newstack. 4671 // No lock needs to be held. 4672 // Returns true if preemption request was issued. 4673 // The actual preemption will happen at some point in the future 4674 // and will be indicated by the gp->status no longer being 4675 // Grunning 4676 func preemptone(_p_ *p) bool { 4677 mp := _p_.m.ptr() 4678 if mp == nil || mp == getg().m { 4679 return false 4680 } 4681 gp := mp.curg 4682 if gp == nil || gp == mp.g0 { 4683 return false 4684 } 4685 4686 gp.preempt = true 4687 4688 // Every call in a go routine checks for stack overflow by 4689 // comparing the current stack pointer to gp->stackguard0. 4690 // Setting gp->stackguard0 to StackPreempt folds 4691 // preemption into the normal stack overflow check. 4692 gp.stackguard0 = stackPreempt 4693 4694 // Request an async preemption of this P. 4695 if preemptMSupported && debug.asyncpreemptoff == 0 { 4696 _p_.preempt = true 4697 preemptM(mp) 4698 } 4699 4700 return true 4701 } 4702 4703 var starttime int64 4704 4705 func schedtrace(detailed bool) { 4706 now := nanotime() 4707 if starttime == 0 { 4708 starttime = now 4709 } 4710 4711 lock(&sched.lock) 4712 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 4713 if detailed { 4714 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 4715 } 4716 // We must be careful while reading data from P's, M's and G's. 4717 // Even if we hold schedlock, most data can be changed concurrently. 4718 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 4719 for i, _p_ := range allp { 4720 mp := _p_.m.ptr() 4721 h := atomic.Load(&_p_.runqhead) 4722 t := atomic.Load(&_p_.runqtail) 4723 if detailed { 4724 id := int64(-1) 4725 if mp != nil { 4726 id = mp.id 4727 } 4728 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gFree.n, " timerslen=", len(_p_.timers), "\n") 4729 } else { 4730 // In non-detailed mode format lengths of per-P run queues as: 4731 // [len1 len2 len3 len4] 4732 print(" ") 4733 if i == 0 { 4734 print("[") 4735 } 4736 print(t - h) 4737 if i == len(allp)-1 { 4738 print("]\n") 4739 } 4740 } 4741 } 4742 4743 if !detailed { 4744 unlock(&sched.lock) 4745 return 4746 } 4747 4748 for mp := allm; mp != nil; mp = mp.alllink { 4749 _p_ := mp.p.ptr() 4750 gp := mp.curg 4751 lockedg := mp.lockedg.ptr() 4752 id1 := int32(-1) 4753 if _p_ != nil { 4754 id1 = _p_.id 4755 } 4756 id2 := int64(-1) 4757 if gp != nil { 4758 id2 = gp.goid 4759 } 4760 id3 := int64(-1) 4761 if lockedg != nil { 4762 id3 = lockedg.goid 4763 } 4764 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") 4765 } 4766 4767 lock(&allglock) 4768 for gi := 0; gi < len(allgs); gi++ { 4769 gp := allgs[gi] 4770 mp := gp.m 4771 lockedm := gp.lockedm.ptr() 4772 id1 := int64(-1) 4773 if mp != nil { 4774 id1 = mp.id 4775 } 4776 id2 := int64(-1) 4777 if lockedm != nil { 4778 id2 = lockedm.id 4779 } 4780 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=", id1, " lockedm=", id2, "\n") 4781 } 4782 unlock(&allglock) 4783 unlock(&sched.lock) 4784 } 4785 4786 // schedEnableUser enables or disables the scheduling of user 4787 // goroutines. 4788 // 4789 // This does not stop already running user goroutines, so the caller 4790 // should first stop the world when disabling user goroutines. 4791 func schedEnableUser(enable bool) { 4792 lock(&sched.lock) 4793 if sched.disable.user == !enable { 4794 unlock(&sched.lock) 4795 return 4796 } 4797 sched.disable.user = !enable 4798 if enable { 4799 n := sched.disable.n 4800 sched.disable.n = 0 4801 globrunqputbatch(&sched.disable.runnable, n) 4802 unlock(&sched.lock) 4803 for ; n != 0 && sched.npidle != 0; n-- { 4804 startm(nil, false) 4805 } 4806 } else { 4807 unlock(&sched.lock) 4808 } 4809 } 4810 4811 // schedEnabled reports whether gp should be scheduled. It returns 4812 // false is scheduling of gp is disabled. 4813 func schedEnabled(gp *g) bool { 4814 if sched.disable.user { 4815 return isSystemGoroutine(gp, true) 4816 } 4817 return true 4818 } 4819 4820 // Put mp on midle list. 4821 // Sched must be locked. 4822 // May run during STW, so write barriers are not allowed. 4823 //go:nowritebarrierrec 4824 func mput(mp *m) { 4825 mp.schedlink = sched.midle 4826 sched.midle.set(mp) 4827 sched.nmidle++ 4828 checkdead() 4829 } 4830 4831 // Try to get an m from midle list. 4832 // Sched must be locked. 4833 // May run during STW, so write barriers are not allowed. 4834 //go:nowritebarrierrec 4835 func mget() *m { 4836 mp := sched.midle.ptr() 4837 if mp != nil { 4838 sched.midle = mp.schedlink 4839 sched.nmidle-- 4840 } 4841 return mp 4842 } 4843 4844 // Put gp on the global runnable queue. 4845 // Sched must be locked. 4846 // May run during STW, so write barriers are not allowed. 4847 //go:nowritebarrierrec 4848 func globrunqput(gp *g) { 4849 sched.runq.pushBack(gp) 4850 sched.runqsize++ 4851 } 4852 4853 // Put gp at the head of the global runnable queue. 4854 // Sched must be locked. 4855 // May run during STW, so write barriers are not allowed. 4856 //go:nowritebarrierrec 4857 func globrunqputhead(gp *g) { 4858 sched.runq.push(gp) 4859 sched.runqsize++ 4860 } 4861 4862 // Put a batch of runnable goroutines on the global runnable queue. 4863 // This clears *batch. 4864 // Sched must be locked. 4865 func globrunqputbatch(batch *gQueue, n int32) { 4866 sched.runq.pushBackAll(*batch) 4867 sched.runqsize += n 4868 *batch = gQueue{} 4869 } 4870 4871 // Try get a batch of G's from the global runnable queue. 4872 // Sched must be locked. 4873 func globrunqget(_p_ *p, max int32) *g { 4874 if sched.runqsize == 0 { 4875 return nil 4876 } 4877 4878 n := sched.runqsize/gomaxprocs + 1 4879 if n > sched.runqsize { 4880 n = sched.runqsize 4881 } 4882 if max > 0 && n > max { 4883 n = max 4884 } 4885 if n > int32(len(_p_.runq))/2 { 4886 n = int32(len(_p_.runq)) / 2 4887 } 4888 4889 sched.runqsize -= n 4890 4891 gp := sched.runq.pop() 4892 n-- 4893 for ; n > 0; n-- { 4894 gp1 := sched.runq.pop() 4895 runqput(_p_, gp1, false) 4896 } 4897 return gp 4898 } 4899 4900 // Put p to on _Pidle list. 4901 // Sched must be locked. 4902 // May run during STW, so write barriers are not allowed. 4903 //go:nowritebarrierrec 4904 func pidleput(_p_ *p) { 4905 if !runqempty(_p_) { 4906 throw("pidleput: P has non-empty run queue") 4907 } 4908 _p_.link = sched.pidle 4909 sched.pidle.set(_p_) 4910 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 4911 } 4912 4913 // Try get a p from _Pidle list. 4914 // Sched must be locked. 4915 // May run during STW, so write barriers are not allowed. 4916 //go:nowritebarrierrec 4917 func pidleget() *p { 4918 _p_ := sched.pidle.ptr() 4919 if _p_ != nil { 4920 sched.pidle = _p_.link 4921 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 4922 } 4923 return _p_ 4924 } 4925 4926 // runqempty reports whether _p_ has no Gs on its local run queue. 4927 // It never returns true spuriously. 4928 func runqempty(_p_ *p) bool { 4929 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, 4930 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext. 4931 // Simply observing that runqhead == runqtail and then observing that runqnext == nil 4932 // does not mean the queue is empty. 4933 for { 4934 head := atomic.Load(&_p_.runqhead) 4935 tail := atomic.Load(&_p_.runqtail) 4936 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext))) 4937 if tail == atomic.Load(&_p_.runqtail) { 4938 return head == tail && runnext == 0 4939 } 4940 } 4941 } 4942 4943 // To shake out latent assumptions about scheduling order, 4944 // we introduce some randomness into scheduling decisions 4945 // when running with the race detector. 4946 // The need for this was made obvious by changing the 4947 // (deterministic) scheduling order in Go 1.5 and breaking 4948 // many poorly-written tests. 4949 // With the randomness here, as long as the tests pass 4950 // consistently with -race, they shouldn't have latent scheduling 4951 // assumptions. 4952 const randomizeScheduler = raceenabled 4953 4954 // runqput tries to put g on the local runnable queue. 4955 // If next is false, runqput adds g to the tail of the runnable queue. 4956 // If next is true, runqput puts g in the _p_.runnext slot. 4957 // If the run queue is full, runnext puts g on the global queue. 4958 // Executed only by the owner P. 4959 func runqput(_p_ *p, gp *g, next bool) { 4960 if randomizeScheduler && next && fastrand()%2 == 0 { 4961 next = false 4962 } 4963 4964 if next { 4965 retryNext: 4966 oldnext := _p_.runnext 4967 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 4968 goto retryNext 4969 } 4970 if oldnext == 0 { 4971 return 4972 } 4973 // Kick the old runnext out to the regular run queue. 4974 gp = oldnext.ptr() 4975 } 4976 4977 retry: 4978 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers 4979 t := _p_.runqtail 4980 if t-h < uint32(len(_p_.runq)) { 4981 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 4982 atomic.StoreRel(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 4983 return 4984 } 4985 if runqputslow(_p_, gp, h, t) { 4986 return 4987 } 4988 // the queue is not full, now the put above must succeed 4989 goto retry 4990 } 4991 4992 // Put g and a batch of work from local runnable queue on global queue. 4993 // Executed only by the owner P. 4994 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 4995 var batch [len(_p_.runq)/2 + 1]*g 4996 4997 // First, grab a batch from local queue. 4998 n := t - h 4999 n = n / 2 5000 if n != uint32(len(_p_.runq)/2) { 5001 throw("runqputslow: queue is not full") 5002 } 5003 for i := uint32(0); i < n; i++ { 5004 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 5005 } 5006 if !atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume 5007 return false 5008 } 5009 batch[n] = gp 5010 5011 if randomizeScheduler { 5012 for i := uint32(1); i <= n; i++ { 5013 j := fastrandn(i + 1) 5014 batch[i], batch[j] = batch[j], batch[i] 5015 } 5016 } 5017 5018 // Link the goroutines. 5019 for i := uint32(0); i < n; i++ { 5020 batch[i].schedlink.set(batch[i+1]) 5021 } 5022 var q gQueue 5023 q.head.set(batch[0]) 5024 q.tail.set(batch[n]) 5025 5026 // Now put the batch on global queue. 5027 lock(&sched.lock) 5028 globrunqputbatch(&q, int32(n+1)) 5029 unlock(&sched.lock) 5030 return true 5031 } 5032 5033 // Get g from local runnable queue. 5034 // If inheritTime is true, gp should inherit the remaining time in the 5035 // current time slice. Otherwise, it should start a new time slice. 5036 // Executed only by the owner P. 5037 func runqget(_p_ *p) (gp *g, inheritTime bool) { 5038 // If there's a runnext, it's the next G to run. 5039 for { 5040 next := _p_.runnext 5041 if next == 0 { 5042 break 5043 } 5044 if _p_.runnext.cas(next, 0) { 5045 return next.ptr(), true 5046 } 5047 } 5048 5049 for { 5050 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers 5051 t := _p_.runqtail 5052 if t == h { 5053 return nil, false 5054 } 5055 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 5056 if atomic.CasRel(&_p_.runqhead, h, h+1) { // cas-release, commits consume 5057 return gp, false 5058 } 5059 } 5060 } 5061 5062 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 5063 // Batch is a ring buffer starting at batchHead. 5064 // Returns number of grabbed goroutines. 5065 // Can be executed by any P. 5066 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 5067 for { 5068 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with other consumers 5069 t := atomic.LoadAcq(&_p_.runqtail) // load-acquire, synchronize with the producer 5070 n := t - h 5071 n = n - n/2 5072 if n == 0 { 5073 if stealRunNextG { 5074 // Try to steal from _p_.runnext. 5075 if next := _p_.runnext; next != 0 { 5076 if _p_.status == _Prunning { 5077 // Sleep to ensure that _p_ isn't about to run the g 5078 // we are about to steal. 5079 // The important use case here is when the g running 5080 // on _p_ ready()s another g and then almost 5081 // immediately blocks. Instead of stealing runnext 5082 // in this window, back off to give _p_ a chance to 5083 // schedule runnext. This will avoid thrashing gs 5084 // between different Ps. 5085 // A sync chan send/recv takes ~50ns as of time of 5086 // writing, so 3us gives ~50x overshoot. 5087 if GOOS != "windows" { 5088 usleep(3) 5089 } else { 5090 // On windows system timer granularity is 5091 // 1-15ms, which is way too much for this 5092 // optimization. So just yield. 5093 osyield() 5094 } 5095 } 5096 if !_p_.runnext.cas(next, 0) { 5097 continue 5098 } 5099 batch[batchHead%uint32(len(batch))] = next 5100 return 1 5101 } 5102 } 5103 return 0 5104 } 5105 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 5106 continue 5107 } 5108 for i := uint32(0); i < n; i++ { 5109 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 5110 batch[(batchHead+i)%uint32(len(batch))] = g 5111 } 5112 if atomic.CasRel(&_p_.runqhead, h, h+n) { // cas-release, commits consume 5113 return n 5114 } 5115 } 5116 } 5117 5118 // Steal half of elements from local runnable queue of p2 5119 // and put onto local runnable queue of p. 5120 // Returns one of the stolen elements (or nil if failed). 5121 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 5122 t := _p_.runqtail 5123 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 5124 if n == 0 { 5125 return nil 5126 } 5127 n-- 5128 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 5129 if n == 0 { 5130 return gp 5131 } 5132 h := atomic.LoadAcq(&_p_.runqhead) // load-acquire, synchronize with consumers 5133 if t-h+n >= uint32(len(_p_.runq)) { 5134 throw("runqsteal: runq overflow") 5135 } 5136 atomic.StoreRel(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 5137 return gp 5138 } 5139 5140 // A gQueue is a dequeue of Gs linked through g.schedlink. A G can only 5141 // be on one gQueue or gList at a time. 5142 type gQueue struct { 5143 head guintptr 5144 tail guintptr 5145 } 5146 5147 // empty reports whether q is empty. 5148 func (q *gQueue) empty() bool { 5149 return q.head == 0 5150 } 5151 5152 // push adds gp to the head of q. 5153 func (q *gQueue) push(gp *g) { 5154 gp.schedlink = q.head 5155 q.head.set(gp) 5156 if q.tail == 0 { 5157 q.tail.set(gp) 5158 } 5159 } 5160 5161 // pushBack adds gp to the tail of q. 5162 func (q *gQueue) pushBack(gp *g) { 5163 gp.schedlink = 0 5164 if q.tail != 0 { 5165 q.tail.ptr().schedlink.set(gp) 5166 } else { 5167 q.head.set(gp) 5168 } 5169 q.tail.set(gp) 5170 } 5171 5172 // pushBackAll adds all Gs in l2 to the tail of q. After this q2 must 5173 // not be used. 5174 func (q *gQueue) pushBackAll(q2 gQueue) { 5175 if q2.tail == 0 { 5176 return 5177 } 5178 q2.tail.ptr().schedlink = 0 5179 if q.tail != 0 { 5180 q.tail.ptr().schedlink = q2.head 5181 } else { 5182 q.head = q2.head 5183 } 5184 q.tail = q2.tail 5185 } 5186 5187 // pop removes and returns the head of queue q. It returns nil if 5188 // q is empty. 5189 func (q *gQueue) pop() *g { 5190 gp := q.head.ptr() 5191 if gp != nil { 5192 q.head = gp.schedlink 5193 if q.head == 0 { 5194 q.tail = 0 5195 } 5196 } 5197 return gp 5198 } 5199 5200 // popList takes all Gs in q and returns them as a gList. 5201 func (q *gQueue) popList() gList { 5202 stack := gList{q.head} 5203 *q = gQueue{} 5204 return stack 5205 } 5206 5207 // A gList is a list of Gs linked through g.schedlink. A G can only be 5208 // on one gQueue or gList at a time. 5209 type gList struct { 5210 head guintptr 5211 } 5212 5213 // empty reports whether l is empty. 5214 func (l *gList) empty() bool { 5215 return l.head == 0 5216 } 5217 5218 // push adds gp to the head of l. 5219 func (l *gList) push(gp *g) { 5220 gp.schedlink = l.head 5221 l.head.set(gp) 5222 } 5223 5224 // pushAll prepends all Gs in q to l. 5225 func (l *gList) pushAll(q gQueue) { 5226 if !q.empty() { 5227 q.tail.ptr().schedlink = l.head 5228 l.head = q.head 5229 } 5230 } 5231 5232 // pop removes and returns the head of l. If l is empty, it returns nil. 5233 func (l *gList) pop() *g { 5234 gp := l.head.ptr() 5235 if gp != nil { 5236 l.head = gp.schedlink 5237 } 5238 return gp 5239 } 5240 5241 //go:linkname setMaxThreads runtime/debug.setMaxThreads 5242 func setMaxThreads(in int) (out int) { 5243 lock(&sched.lock) 5244 out = int(sched.maxmcount) 5245 if in > 0x7fffffff { // MaxInt32 5246 sched.maxmcount = 0x7fffffff 5247 } else { 5248 sched.maxmcount = int32(in) 5249 } 5250 checkmcount() 5251 unlock(&sched.lock) 5252 return 5253 } 5254 5255 func haveexperiment(name string) bool { 5256 if name == "framepointer" { 5257 return framepointer_enabled // set by linker 5258 } 5259 x := sys.Goexperiment 5260 for x != "" { 5261 xname := "" 5262 i := index(x, ",") 5263 if i < 0 { 5264 xname, x = x, "" 5265 } else { 5266 xname, x = x[:i], x[i+1:] 5267 } 5268 if xname == name { 5269 return true 5270 } 5271 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name { 5272 return false 5273 } 5274 } 5275 return false 5276 } 5277 5278 //go:nosplit 5279 func procPin() int { 5280 _g_ := getg() 5281 mp := _g_.m 5282 5283 mp.locks++ 5284 return int(mp.p.ptr().id) 5285 } 5286 5287 //go:nosplit 5288 func procUnpin() { 5289 _g_ := getg() 5290 _g_.m.locks-- 5291 } 5292 5293 //go:linkname sync_runtime_procPin sync.runtime_procPin 5294 //go:nosplit 5295 func sync_runtime_procPin() int { 5296 return procPin() 5297 } 5298 5299 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 5300 //go:nosplit 5301 func sync_runtime_procUnpin() { 5302 procUnpin() 5303 } 5304 5305 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 5306 //go:nosplit 5307 func sync_atomic_runtime_procPin() int { 5308 return procPin() 5309 } 5310 5311 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 5312 //go:nosplit 5313 func sync_atomic_runtime_procUnpin() { 5314 procUnpin() 5315 } 5316 5317 // Active spinning for sync.Mutex. 5318 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 5319 //go:nosplit 5320 func sync_runtime_canSpin(i int) bool { 5321 // sync.Mutex is cooperative, so we are conservative with spinning. 5322 // Spin only few times and only if running on a multicore machine and 5323 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 5324 // As opposed to runtime mutex we don't do passive spinning here, 5325 // because there can be work on global runq or on other Ps. 5326 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 5327 return false 5328 } 5329 if p := getg().m.p.ptr(); !runqempty(p) { 5330 return false 5331 } 5332 return true 5333 } 5334 5335 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 5336 //go:nosplit 5337 func sync_runtime_doSpin() { 5338 procyield(active_spin_cnt) 5339 } 5340 5341 var stealOrder randomOrder 5342 5343 // randomOrder/randomEnum are helper types for randomized work stealing. 5344 // They allow to enumerate all Ps in different pseudo-random orders without repetitions. 5345 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS 5346 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration. 5347 type randomOrder struct { 5348 count uint32 5349 coprimes []uint32 5350 } 5351 5352 type randomEnum struct { 5353 i uint32 5354 count uint32 5355 pos uint32 5356 inc uint32 5357 } 5358 5359 func (ord *randomOrder) reset(count uint32) { 5360 ord.count = count 5361 ord.coprimes = ord.coprimes[:0] 5362 for i := uint32(1); i <= count; i++ { 5363 if gcd(i, count) == 1 { 5364 ord.coprimes = append(ord.coprimes, i) 5365 } 5366 } 5367 } 5368 5369 func (ord *randomOrder) start(i uint32) randomEnum { 5370 return randomEnum{ 5371 count: ord.count, 5372 pos: i % ord.count, 5373 inc: ord.coprimes[i%uint32(len(ord.coprimes))], 5374 } 5375 } 5376 5377 func (enum *randomEnum) done() bool { 5378 return enum.i == enum.count 5379 } 5380 5381 func (enum *randomEnum) next() { 5382 enum.i++ 5383 enum.pos = (enum.pos + enum.inc) % enum.count 5384 } 5385 5386 func (enum *randomEnum) position() uint32 { 5387 return enum.pos 5388 } 5389 5390 func gcd(a, b uint32) uint32 { 5391 for b != 0 { 5392 a, b = b, a%b 5393 } 5394 return a 5395 } 5396 5397 // An initTask represents the set of initializations that need to be done for a package. 5398 // Keep in sync with ../../test/initempty.go:initTask 5399 type initTask struct { 5400 // TODO: pack the first 3 fields more tightly? 5401 state uintptr // 0 = uninitialized, 1 = in progress, 2 = done 5402 ndeps uintptr 5403 nfns uintptr 5404 // followed by ndeps instances of an *initTask, one per package depended on 5405 // followed by nfns pcs, one per init function to run 5406 } 5407 5408 func doInit(t *initTask) { 5409 switch t.state { 5410 case 2: // fully initialized 5411 return 5412 case 1: // initialization in progress 5413 throw("recursive call during initialization - linker skew") 5414 default: // not initialized yet 5415 t.state = 1 // initialization in progress 5416 for i := uintptr(0); i < t.ndeps; i++ { 5417 p := add(unsafe.Pointer(t), (3+i)*sys.PtrSize) 5418 t2 := *(**initTask)(p) 5419 doInit(t2) 5420 } 5421 for i := uintptr(0); i < t.nfns; i++ { 5422 p := add(unsafe.Pointer(t), (3+t.ndeps+i)*sys.PtrSize) 5423 f := *(*func())(unsafe.Pointer(&p)) 5424 f() 5425 } 5426 t.state = 2 // initialization done 5427 } 5428 }