github.com/ltltlt/go-source-code@v0.0.0-20190830023027-95be009773aa/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 var buildVersion = sys.TheVersion 14 15 // Goroutine scheduler 16 // The scheduler's job is to distribute ready-to-run goroutines over worker threads. 17 // 调度器的工作是分发准备工作的goroutine到worker线程(系统线程) 18 // https://povilasv.me/go-scheduler/# 19 // https://news.ycombinator.com/item?id=12459841 20 // https://tonybai.com/2017/06/23/an-intro-about-goroutine-scheduler/ 21 // 22 // The main concepts are: 23 // G - goroutine. 24 // M - worker thread, or machine. 25 // P - processor, a resource that is required to execute Go code. 26 // M must have an associated P to execute Go code, however it can be 27 // blocked or in a syscall w/o an associated P. 28 // 或许M是主动方,其要获取G来执行需要通过P 29 // M想要执行G得先关联一个P, 所以通过GOMAXPROCS可以控制同一时间最大并行量(即P的数目) 30 // 但不是M的数目, 因为一些os thread可能在系统调用时阻塞了, M的数目一般>GOMAXPROCS 31 32 // 当一个G进行blocking system call时,运行其的M也会被阻塞(必须得有一个os thread 33 // 阻塞以等待返回, 因为许多系统调用不能poll(比如创建一个文件)), 当P会释放,以绑定其他 34 // M,这样P上的其他G会继续被执行 35 36 // 当G进行网络操作send/recv时,G阻塞,这个fd会被传给net poller,这个os thread等待 37 // 一堆fd激活;G阻塞,运行这个G的M不阻塞(net poller帮其等待fd的激活),这个M可以继续 38 // 执行这个P上的其他G(如果这个P上没有其他G,这个P会随便选一个其他P,偷走一半G;不过如果 39 // 完全没有G可供运行,这个M会解绑P进入sleep状态) 40 41 // 当G进行channel操作时,情况和网络操作很类似 42 43 // 44 // Design doc at https://golang.org/s/go11sched. 45 46 // Worker thread parking/unparking. 47 // We need to balance between keeping enough running worker threads to utilize 48 // available hardware parallelism and parking excessive running worker threads 49 // to conserve CPU resources and power. This is not simple for two reasons: 50 // (1) scheduler state is intentionally distributed (in particular, per-P work 51 // queues), so it is not possible to compute global predicates on fast paths; 52 // (2) for optimal thread management we would need to know the future (don't park 53 // a worker thread when a new goroutine will be readied in near future). 54 // 55 // Three rejected approaches that would work badly: 56 // 1. Centralize all scheduler state (would inhibit scalability). 57 // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there 58 // is a spare P, unpark a thread and handoff it the thread and the goroutine. 59 // This would lead to thread state thrashing, as the thread that readied the 60 // goroutine can be out of work the very next moment, we will need to park it. 61 // Also, it would destroy locality of computation as we want to preserve 62 // dependent goroutines on the same thread; and introduce additional latency. 63 // 3. Unpark an additional thread whenever we ready a goroutine and there is an 64 // idle P, but don't do handoff. This would lead to excessive thread parking/ 65 // unparking as the additional threads will instantly park without discovering 66 // any work to do. 67 // 68 // The current approach: 69 // We unpark an additional thread when we ready a goroutine if (1) there is an 70 // idle P and there are no "spinning" worker threads. A worker thread is considered 71 // spinning if it is out of local work and did not find work in global run queue/ 72 // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning. 73 // Threads unparked this way are also considered spinning; we don't do goroutine 74 // handoff so such threads are out of work initially. Spinning threads do some 75 // spinning looking for work in per-P run queues before parking. If a spinning 76 // thread finds work it takes itself out of the spinning state and proceeds to 77 // execution. If it does not find work it takes itself out of the spinning state 78 // and then parks. 79 // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark 80 // new threads when readying goroutines. To compensate for that, if the last spinning 81 // thread finds work and stops spinning, it must unpark a new spinning thread. 82 // This approach smooths out unjustified spikes of thread unparking, 83 // but at the same time guarantees eventual maximal CPU parallelism utilization. 84 // 85 // The main implementation complication is that we need to be very careful during 86 // spinning->non-spinning thread transition. This transition can race with submission 87 // of a new goroutine, and either one part or another needs to unpark another worker 88 // thread. If they both fail to do that, we can end up with semi-persistent CPU 89 // underutilization. The general pattern for goroutine readying is: submit a goroutine 90 // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning. 91 // The general pattern for spinning->non-spinning transition is: decrement nmspinning, 92 // #StoreLoad-style memory barrier, check all per-P work queues for new work. 93 // Note that all this complexity does not apply to global run queue as we are not 94 // sloppy about thread unparking when submitting to global queue. Also see comments 95 // for nmspinning manipulation. 96 97 var ( 98 m0 m 99 g0 g 100 raceprocctx0 uintptr 101 ) 102 103 //go:linkname runtime_init runtime.init 104 func runtime_init() 105 106 //go:linkname main_init main.init 107 func main_init() 108 109 // main_init_done is a signal used by cgocallbackg that initialization 110 // has been completed. It is made before _cgo_notify_runtime_init_done, 111 // so all cgo calls can rely on it existing. When main_init is complete, 112 // it is closed, meaning cgocallbackg can reliably receive from it. 113 var main_init_done chan bool 114 115 //go:linkname main_main main.main 116 func main_main() 117 118 // mainStarted indicates that the main M has started. 119 var mainStarted bool 120 121 // runtimeInitTime is the nanotime() at which the runtime started. 122 var runtimeInitTime int64 123 124 // Value to use for signal mask for newly created M's. 125 var initSigmask sigset 126 127 // The main goroutine. 128 func main() { 129 g := getg() 130 131 // Racectx of m0->g0 is used only as the parent of the main goroutine. 132 // It must not be used for anything else. 133 g.m.g0.racectx = 0 134 135 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 136 // Using decimal instead of binary GB and MB because 137 // they look nicer in the stack overflow failure message. 138 if sys.PtrSize == 8 { 139 maxstacksize = 1000000000 140 } else { 141 maxstacksize = 250000000 142 } 143 144 // Allow newproc to start new Ms. 145 mainStarted = true 146 147 systemstack(func() { 148 newm(sysmon, nil) 149 }) 150 151 // Lock the main goroutine onto this, the main OS thread, 152 // during initialization. Most programs won't care, but a few 153 // do require certain calls to be made by the main thread. 154 // Those can arrange for main.main to run in the main thread 155 // by calling runtime.LockOSThread during initialization 156 // to preserve the lock. 157 lockOSThread() 158 159 if g.m != &m0 { 160 throw("runtime.main not on m0") 161 } 162 163 runtime_init() // must be before defer 164 if nanotime() == 0 { 165 throw("nanotime returning zero") 166 } 167 168 // Defer unlock so that runtime.Goexit during init does the unlock too. 169 needUnlock := true 170 defer func() { 171 if needUnlock { 172 unlockOSThread() 173 } 174 }() 175 176 // Record when the world started. Must be after runtime_init 177 // because nanotime on some platforms depends on startNano. 178 runtimeInitTime = nanotime() 179 180 gcenable() 181 182 main_init_done = make(chan bool) 183 if iscgo { 184 if _cgo_thread_start == nil { 185 throw("_cgo_thread_start missing") 186 } 187 if GOOS != "windows" { 188 if _cgo_setenv == nil { 189 throw("_cgo_setenv missing") 190 } 191 if _cgo_unsetenv == nil { 192 throw("_cgo_unsetenv missing") 193 } 194 } 195 if _cgo_notify_runtime_init_done == nil { 196 throw("_cgo_notify_runtime_init_done missing") 197 } 198 // Start the template thread in case we enter Go from 199 // a C-created thread and need to create a new thread. 200 startTemplateThread() 201 cgocall(_cgo_notify_runtime_init_done, nil) 202 } 203 204 // main包(用户代码入口)的init函数, 编译器应该会将main包里所有的init合并到一个init里 205 fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 206 fn() 207 close(main_init_done) 208 209 needUnlock = false 210 unlockOSThread() 211 212 if isarchive || islibrary { 213 // A program compiled with -buildmode=c-archive or c-shared 214 // has a main, but it is not executed. 215 return 216 } 217 // main包里的main, 用户代码的入口 218 fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime 219 fn() 220 if raceenabled { 221 racefini() 222 } 223 224 // Make racy client program work: if panicking on 225 // another goroutine at the same time as main returns, 226 // let the other goroutine finish printing the panic trace. 227 // Once it does, it will exit. See issues 3934 and 20018. 228 if atomic.Load(&runningPanicDefers) != 0 { 229 // Running deferred functions should not take long. 230 for c := 0; c < 1000; c++ { 231 if atomic.Load(&runningPanicDefers) == 0 { 232 break 233 } 234 Gosched() 235 } 236 } 237 if atomic.Load(&panicking) != 0 { 238 gopark(nil, nil, "panicwait", traceEvGoStop, 1) 239 } 240 241 exit(0) 242 // below cannot be reached, and is weird(agreed) 243 for { 244 var x *int32 245 *x = 0 246 } 247 } 248 249 // os_beforeExit is called from os.Exit(0). 250 //go:linkname os_beforeExit os.runtime_beforeExit 251 func os_beforeExit() { 252 if raceenabled { 253 racefini() 254 } 255 } 256 257 // start forcegc helper goroutine 258 func init() { 259 go forcegchelper() 260 } 261 262 func forcegchelper() { 263 forcegc.g = getg() 264 for { 265 lock(&forcegc.lock) 266 if forcegc.idle != 0 { 267 throw("forcegc: phase error") 268 } 269 atomic.Store(&forcegc.idle, 1) 270 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1) 271 // this goroutine is explicitly resumed by sysmon 272 if debug.gctrace > 0 { 273 println("GC forced") 274 } 275 // Time-triggered, fully concurrent. 276 gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()}) 277 } 278 } 279 280 //go:nosplit 281 282 // Gosched yields the processor, allowing other goroutines to run. It does not 283 // suspend the current goroutine, so execution resumes automatically. 284 func Gosched() { 285 mcall(gosched_m) 286 } 287 288 // goschedguarded yields the processor like gosched, but also checks 289 // for forbidden states and opts out of the yield in those cases. 290 //go:nosplit 291 func goschedguarded() { 292 mcall(goschedguarded_m) 293 } 294 295 // Puts the current goroutine into a waiting state and calls unlockf. 296 // If unlockf returns false, the goroutine is resumed. 297 // unlockf must not access this G's stack, as it may be moved between 298 // the call to gopark and the call to unlockf. 299 // 将当前goroutine设为等待状态然后调用unlockf 300 // 如果unlockf返回false, goroutine恢复. 301 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) { 302 mp := acquirem() 303 gp := mp.curg 304 status := readgstatus(gp) 305 if status != _Grunning && status != _Gscanrunning { 306 throw("gopark: bad g status") 307 } 308 mp.waitlock = lock 309 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) 310 gp.waitreason = reason 311 mp.waittraceev = traceEv 312 mp.waittraceskip = traceskip 313 releasem(mp) 314 // can't do anything that might move the G between Ms here. 315 mcall(park_m) 316 } 317 318 // Puts the current goroutine into a waiting state and unlocks the lock. 319 // The goroutine can be made runnable again by calling goready(gp). 320 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) { 321 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 322 } 323 324 func goready(gp *g, traceskip int) { 325 systemstack(func() { 326 ready(gp, traceskip, true) 327 }) 328 } 329 330 // 获得一个sudog, 这个sudog内的成员未初始化 331 //go:nosplit 332 func acquireSudog() *sudog { 333 // Delicate dance: the semaphore implementation calls 334 // acquireSudog, acquireSudog calls new(sudog), 335 // new calls malloc, malloc can call the garbage collector, 336 // and the garbage collector calls the semaphore implementation 337 // in stopTheWorld. 338 // Break the cycle by doing acquirem/releasem around new(sudog). 339 // The acquirem/releasem increments m.locks during new(sudog), 340 // which keeps the garbage collector from being invoked. 341 mp := acquirem() 342 pp := mp.p.ptr() 343 if len(pp.sudogcache) == 0 { 344 lock(&sched.sudoglock) 345 // First, try to grab a batch from central cache. 346 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 347 s := sched.sudogcache 348 sched.sudogcache = s.next 349 s.next = nil 350 pp.sudogcache = append(pp.sudogcache, s) 351 } 352 unlock(&sched.sudoglock) 353 // If the central cache is empty, allocate a new one. 354 if len(pp.sudogcache) == 0 { 355 pp.sudogcache = append(pp.sudogcache, new(sudog)) 356 } 357 } 358 n := len(pp.sudogcache) 359 s := pp.sudogcache[n-1] 360 pp.sudogcache[n-1] = nil 361 pp.sudogcache = pp.sudogcache[:n-1] 362 if s.elem != nil { 363 throw("acquireSudog: found s.elem != nil in cache") 364 } 365 releasem(mp) 366 return s 367 } 368 369 //go:nosplit 370 func releaseSudog(s *sudog) { 371 if s.elem != nil { 372 throw("runtime: sudog with non-nil elem") 373 } 374 if s.isSelect { 375 throw("runtime: sudog with non-false isSelect") 376 } 377 if s.next != nil { 378 throw("runtime: sudog with non-nil next") 379 } 380 if s.prev != nil { 381 throw("runtime: sudog with non-nil prev") 382 } 383 if s.waitlink != nil { 384 throw("runtime: sudog with non-nil waitlink") 385 } 386 if s.c != nil { 387 throw("runtime: sudog with non-nil c") 388 } 389 gp := getg() 390 if gp.param != nil { 391 throw("runtime: releaseSudog with non-nil gp.param") 392 } 393 mp := acquirem() // avoid rescheduling to another P 394 pp := mp.p.ptr() 395 if len(pp.sudogcache) == cap(pp.sudogcache) { 396 // Transfer half of local cache to the central cache. 397 var first, last *sudog 398 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 399 n := len(pp.sudogcache) 400 p := pp.sudogcache[n-1] 401 pp.sudogcache[n-1] = nil 402 pp.sudogcache = pp.sudogcache[:n-1] 403 if first == nil { 404 first = p 405 } else { 406 last.next = p 407 } 408 last = p 409 } 410 lock(&sched.sudoglock) 411 last.next = sched.sudogcache 412 sched.sudogcache = first 413 unlock(&sched.sudoglock) 414 } 415 pp.sudogcache = append(pp.sudogcache, s) 416 releasem(mp) 417 } 418 419 // funcPC returns the entry PC of the function f. 420 // 返回函数的入口地址 421 // It assumes that f is a func value. Otherwise the behavior is undefined. 422 // 假设f是个func value(funcval). 否则行为是未定义的 423 // 实际上f是个eface, 结构第一个成员是个指针, 第二个成员是*funcval 424 // CAREFUL: In programs with plugins, funcPC can return different values 425 // for the same function (because there are actually multiple copies of 426 // the same function in the address space). To be safe, don't use the 427 // results of this function in any == expression. It is only safe to 428 // use the result as an address at which to start executing code. 429 //go:nosplit 430 func funcPC(f interface{}) uintptr { 431 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize)) 432 } 433 434 // called from assembly 435 func badmcall(fn func(*g)) { 436 throw("runtime: mcall called on m->g0 stack") 437 } 438 439 func badmcall2(fn func(*g)) { 440 throw("runtime: mcall function returned") 441 } 442 443 func badreflectcall() { 444 panic(plainError("arg size to reflect.call more than 1GB")) 445 } 446 447 var badmorestackg0Msg = "fatal: morestack on g0\n" 448 449 //go:nosplit 450 //go:nowritebarrierrec 451 func badmorestackg0() { 452 sp := stringStructOf(&badmorestackg0Msg) 453 write(2, sp.str, int32(sp.len)) 454 } 455 456 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n" 457 458 //go:nosplit 459 //go:nowritebarrierrec 460 func badmorestackgsignal() { 461 sp := stringStructOf(&badmorestackgsignalMsg) 462 write(2, sp.str, int32(sp.len)) 463 } 464 465 //go:nosplit 466 func badctxt() { 467 throw("ctxt != 0") 468 } 469 470 func lockedOSThread() bool { 471 gp := getg() 472 return gp.lockedm != 0 && gp.m.lockedg != 0 473 } 474 475 var ( 476 allgs []*g 477 allglock mutex 478 ) 479 480 func allgadd(gp *g) { 481 if readgstatus(gp) == _Gidle { 482 throw("allgadd: bad status Gidle") 483 } 484 485 lock(&allglock) 486 allgs = append(allgs, gp) 487 allglen = uintptr(len(allgs)) 488 unlock(&allglock) 489 } 490 491 const ( 492 // Number of goroutine ids to grab from sched.goidgen to local per-P cache at once. 493 // 16 seems to provide enough amortization, but other than that it's mostly arbitrary number. 494 _GoidCacheBatch = 16 495 ) 496 497 // The bootstrap sequence is: 498 // 499 // 先前还会调check, args 500 // call osinit 501 // call schedinit 502 // make & queue new G 503 // call runtime·mstart 504 // 505 // The new G calls runtime·main. 506 func schedinit() { 507 // raceinit must be the first call to race detector. 508 // In particular, it must be done before mallocinit below calls racemapshadow. 509 _g_ := getg() 510 if raceenabled { 511 _g_.racectx, raceprocctx0 = raceinit() 512 } 513 514 sched.maxmcount = 10000 515 516 tracebackinit() 517 moduledataverify() 518 stackinit() 519 mallocinit() 520 mcommoninit(_g_.m) 521 alginit() // maps must not be used before this call, 因为这个函数会初始化hash算法和一些变量 522 modulesinit() // provides activeModules 523 typelinksinit() // uses maps, activeModules 524 itabsinit() // uses activeModules 525 526 msigsave(_g_.m) 527 initSigmask = _g_.m.sigmask 528 529 goargs() 530 goenvs() 531 parsedebugvars() 532 gcinit() 533 534 sched.lastpoll = uint64(nanotime()) 535 procs := ncpu 536 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 { 537 procs = n 538 } 539 if procresize(procs) != nil { 540 throw("unknown runnable goroutine during bootstrap") 541 } 542 543 // For cgocheck > 1, we turn on the write barrier at all times 544 // and check all pointer writes. We can't do this until after 545 // procresize because the write barrier needs a P. 546 if debug.cgocheck > 1 { 547 writeBarrier.cgo = true 548 writeBarrier.enabled = true 549 for _, p := range allp { 550 p.wbBuf.reset() 551 } 552 } 553 554 if buildVersion == "" { 555 // Condition should never trigger. This code just serves 556 // to ensure runtime·buildVersion is kept in the resulting binary. 557 buildVersion = "unknown" 558 } 559 } 560 561 func dumpgstatus(gp *g) { 562 _g_ := getg() 563 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n") 564 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n") 565 } 566 567 func checkmcount() { 568 // sched lock is held 569 if mcount() > sched.maxmcount { 570 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n") 571 throw("thread exhaustion") 572 } 573 } 574 575 func mcommoninit(mp *m) { 576 _g_ := getg() 577 578 // g0 stack won't make sense for user (and is not necessary unwindable). 579 if _g_ != _g_.m.g0 { 580 callers(1, mp.createstack[:]) 581 } 582 583 lock(&sched.lock) 584 if sched.mnext+1 < sched.mnext { 585 throw("runtime: thread ID overflow") 586 } 587 mp.id = sched.mnext 588 sched.mnext++ 589 checkmcount() 590 591 mp.fastrand[0] = 1597334677 * uint32(mp.id) 592 mp.fastrand[1] = uint32(cputicks()) 593 if mp.fastrand[0]|mp.fastrand[1] == 0 { 594 mp.fastrand[1] = 1 595 } 596 597 mpreinit(mp) 598 if mp.gsignal != nil { 599 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard 600 } 601 602 // Add to allm so garbage collector doesn't free g->m 603 // when it is just in a register or thread-local storage. 604 mp.alllink = allm 605 606 // NumCgoCall() iterates over allm w/o schedlock, 607 // so we need to publish it safely. 608 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) 609 unlock(&sched.lock) 610 611 // Allocate memory to hold a cgo traceback if the cgo call crashes. 612 if iscgo || GOOS == "solaris" || GOOS == "windows" { 613 mp.cgoCallers = new(cgoCallers) 614 } 615 } 616 617 // Mark gp ready to run. 618 func ready(gp *g, traceskip int, next bool) { 619 if trace.enabled { 620 traceGoUnpark(gp, traceskip) 621 } 622 623 status := readgstatus(gp) 624 625 // Mark runnable. 626 _g_ := getg() 627 _g_.m.locks++ // disable preemption because it can be holding p in a local var 628 if status&^_Gscan != _Gwaiting { 629 dumpgstatus(gp) 630 throw("bad g->status in ready") 631 } 632 633 // status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 634 casgstatus(gp, _Gwaiting, _Grunnable) 635 runqput(_g_.m.p.ptr(), gp, next) 636 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 637 wakep() 638 } 639 _g_.m.locks-- 640 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack 641 _g_.stackguard0 = stackPreempt 642 } 643 } 644 645 func gcprocs() int32 { 646 // Figure out how many CPUs to use during GC. 647 // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. 648 lock(&sched.lock) 649 n := gomaxprocs 650 if n > ncpu { 651 n = ncpu 652 } 653 if n > _MaxGcproc { 654 n = _MaxGcproc 655 } 656 if n > sched.nmidle+1 { // one M is currently running 657 n = sched.nmidle + 1 658 } 659 unlock(&sched.lock) 660 return n 661 } 662 663 func needaddgcproc() bool { 664 lock(&sched.lock) 665 n := gomaxprocs 666 if n > ncpu { 667 n = ncpu 668 } 669 if n > _MaxGcproc { 670 n = _MaxGcproc 671 } 672 n -= sched.nmidle + 1 // one M is currently running 673 unlock(&sched.lock) 674 return n > 0 675 } 676 677 func helpgc(nproc int32) { 678 _g_ := getg() 679 lock(&sched.lock) 680 pos := 0 681 for n := int32(1); n < nproc; n++ { // one M is currently running 682 if allp[pos].mcache == _g_.m.mcache { 683 pos++ 684 } 685 mp := mget() 686 if mp == nil { 687 throw("gcprocs inconsistency") 688 } 689 mp.helpgc = n 690 mp.p.set(allp[pos]) 691 mp.mcache = allp[pos].mcache 692 pos++ 693 notewakeup(&mp.park) 694 } 695 unlock(&sched.lock) 696 } 697 698 // freezeStopWait is a large value that freezetheworld sets 699 // sched.stopwait to in order to request that all Gs permanently stop. 700 const freezeStopWait = 0x7fffffff 701 702 // freezing is set to non-zero if the runtime is trying to freeze the 703 // world. 704 var freezing uint32 705 706 // Similar to stopTheWorld but best-effort and can be called several times. 707 // There is no reverse operation, used during crashing. 708 // This function must not lock any mutexes. 709 func freezetheworld() { 710 atomic.Store(&freezing, 1) 711 // stopwait and preemption requests can be lost 712 // due to races with concurrently executing threads, 713 // so try several times 714 for i := 0; i < 5; i++ { 715 // this should tell the scheduler to not start any new goroutines 716 sched.stopwait = freezeStopWait 717 atomic.Store(&sched.gcwaiting, 1) 718 // this should stop running goroutines 719 if !preemptall() { 720 break // no running goroutines 721 } 722 usleep(1000) 723 } 724 // to be sure 725 usleep(1000) 726 preemptall() 727 usleep(1000) 728 } 729 730 func isscanstatus(status uint32) bool { 731 if status == _Gscan { 732 throw("isscanstatus: Bad status Gscan") 733 } 734 return status&_Gscan == _Gscan 735 } 736 737 // All reads and writes of g's status go through readgstatus, casgstatus 738 // castogscanstatus, casfrom_Gscanstatus. 739 //go:nosplit 740 func readgstatus(gp *g) uint32 { 741 return atomic.Load(&gp.atomicstatus) 742 } 743 744 // Ownership of gcscanvalid: 745 // 746 // If gp is running (meaning status == _Grunning or _Grunning|_Gscan), 747 // then gp owns gp.gcscanvalid, and other goroutines must not modify it. 748 // 749 // Otherwise, a second goroutine can lock the scan state by setting _Gscan 750 // in the status bit and then modify gcscanvalid, and then unlock the scan state. 751 // 752 // Note that the first condition implies an exception to the second: 753 // if a second goroutine changes gp's status to _Grunning|_Gscan, 754 // that second goroutine still does not have the right to modify gcscanvalid. 755 756 // The Gscanstatuses are acting like locks and this releases them. 757 // If it proves to be a performance hit we should be able to make these 758 // simple atomic stores but for now we are going to throw if 759 // we see an inconsistent state. 760 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) { 761 success := false 762 763 // Check that transition is valid. 764 switch oldval { 765 default: 766 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 767 dumpgstatus(gp) 768 throw("casfrom_Gscanstatus:top gp->status is not in scan state") 769 case _Gscanrunnable, 770 _Gscanwaiting, 771 _Gscanrunning, 772 _Gscansyscall: 773 if newval == oldval&^_Gscan { 774 success = atomic.Cas(&gp.atomicstatus, oldval, newval) 775 } 776 } 777 if !success { 778 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n") 779 dumpgstatus(gp) 780 throw("casfrom_Gscanstatus: gp->status is not in scan state") 781 } 782 } 783 784 // This will return false if the gp is not in the expected status and the cas fails. 785 // This acts like a lock acquire while the casfromgstatus acts like a lock release. 786 func castogscanstatus(gp *g, oldval, newval uint32) bool { 787 switch oldval { 788 case _Grunnable, 789 _Grunning, 790 _Gwaiting, 791 _Gsyscall: 792 if newval == oldval|_Gscan { 793 return atomic.Cas(&gp.atomicstatus, oldval, newval) 794 } 795 } 796 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n") 797 throw("castogscanstatus") 798 panic("not reached") 799 } 800 801 // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus 802 // and casfrom_Gscanstatus instead. 803 // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 804 // put it in the Gscan state is finished. 805 //go:nosplit 806 func casgstatus(gp *g, oldval, newval uint32) { 807 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 808 systemstack(func() { 809 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 810 throw("casgstatus: bad incoming values") 811 }) 812 } 813 814 if oldval == _Grunning && gp.gcscanvalid { 815 // If oldvall == _Grunning, then the actual status must be 816 // _Grunning or _Grunning|_Gscan; either way, 817 // we own gp.gcscanvalid, so it's safe to read. 818 // gp.gcscanvalid must not be true when we are running. 819 systemstack(func() { 820 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n") 821 throw("casgstatus") 822 }) 823 } 824 825 // See http://golang.org/cl/21503 for justification of the yield delay. 826 const yieldDelay = 5 * 1000 827 var nextYield int64 828 829 // loop if gp->atomicstatus is in a scan state giving 830 // GC time to finish and change the state to oldval. 831 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ { 832 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable { 833 systemstack(func() { 834 throw("casgstatus: waiting for Gwaiting but is Grunnable") 835 }) 836 } 837 // Help GC if needed. 838 // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) { 839 // gp.preemptscan = false 840 // systemstack(func() { 841 // gcphasework(gp) 842 // }) 843 // } 844 // But meanwhile just yield. 845 if i == 0 { 846 nextYield = nanotime() + yieldDelay 847 } 848 if nanotime() < nextYield { 849 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ { 850 procyield(1) 851 } 852 } else { 853 osyield() 854 nextYield = nanotime() + yieldDelay/2 855 } 856 } 857 if newval == _Grunning { 858 gp.gcscanvalid = false 859 } 860 } 861 862 // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable. 863 // Returns old status. Cannot call casgstatus directly, because we are racing with an 864 // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus, 865 // it might have become Grunnable by the time we get to the cas. If we called casgstatus, 866 // it would loop waiting for the status to go back to Gwaiting, which it never will. 867 //go:nosplit 868 func casgcopystack(gp *g) uint32 { 869 for { 870 oldstatus := readgstatus(gp) &^ _Gscan 871 if oldstatus != _Gwaiting && oldstatus != _Grunnable { 872 throw("copystack: bad status, not Gwaiting or Grunnable") 873 } 874 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) { 875 return oldstatus 876 } 877 } 878 } 879 880 // scang blocks until gp's stack has been scanned. 881 // It might be scanned by scang or it might be scanned by the goroutine itself. 882 // Either way, the stack scan has completed when scang returns. 883 func scang(gp *g, gcw *gcWork) { 884 // Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone. 885 // Nothing is racing with us now, but gcscandone might be set to true left over 886 // from an earlier round of stack scanning (we scan twice per GC). 887 // We use gcscandone to record whether the scan has been done during this round. 888 889 gp.gcscandone = false 890 891 // See http://golang.org/cl/21503 for justification of the yield delay. 892 const yieldDelay = 10 * 1000 893 var nextYield int64 894 895 // Endeavor to get gcscandone set to true, 896 // either by doing the stack scan ourselves or by coercing gp to scan itself. 897 // gp.gcscandone can transition from false to true when we're not looking 898 // (if we asked for preemption), so any time we lock the status using 899 // castogscanstatus we have to double-check that the scan is still not done. 900 loop: 901 for i := 0; !gp.gcscandone; i++ { 902 switch s := readgstatus(gp); s { 903 default: 904 dumpgstatus(gp) 905 throw("stopg: invalid status") 906 907 case _Gdead: 908 // No stack. 909 gp.gcscandone = true 910 break loop 911 912 case _Gcopystack: 913 // Stack being switched. Go around again. 914 915 case _Grunnable, _Gsyscall, _Gwaiting: 916 // Claim goroutine by setting scan bit. 917 // Racing with execution or readying of gp. 918 // The scan bit keeps them from running 919 // the goroutine until we're done. 920 if castogscanstatus(gp, s, s|_Gscan) { 921 if !gp.gcscandone { 922 scanstack(gp, gcw) 923 gp.gcscandone = true 924 } 925 restartg(gp) 926 break loop 927 } 928 929 case _Gscanwaiting: 930 // newstack is doing a scan for us right now. Wait. 931 932 case _Grunning: 933 // Goroutine running. Try to preempt execution so it can scan itself. 934 // The preemption handler (in newstack) does the actual scan. 935 936 // Optimization: if there is already a pending preemption request 937 // (from the previous loop iteration), don't bother with the atomics. 938 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt { 939 break 940 } 941 942 // Ask for preemption and self scan. 943 if castogscanstatus(gp, _Grunning, _Gscanrunning) { 944 if !gp.gcscandone { 945 gp.preemptscan = true 946 gp.preempt = true 947 gp.stackguard0 = stackPreempt 948 } 949 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning) 950 } 951 } 952 953 if i == 0 { 954 nextYield = nanotime() + yieldDelay 955 } 956 if nanotime() < nextYield { 957 procyield(10) 958 } else { 959 osyield() 960 nextYield = nanotime() + yieldDelay/2 961 } 962 } 963 964 gp.preemptscan = false // cancel scan request if no longer needed 965 } 966 967 // The GC requests that this routine be moved from a scanmumble state to a mumble state. 968 func restartg(gp *g) { 969 s := readgstatus(gp) 970 switch s { 971 default: 972 dumpgstatus(gp) 973 throw("restartg: unexpected status") 974 975 case _Gdead: 976 // ok 977 978 case _Gscanrunnable, 979 _Gscanwaiting, 980 _Gscansyscall: 981 casfrom_Gscanstatus(gp, s, s&^_Gscan) 982 } 983 } 984 985 // stopTheWorld stops all P's from executing goroutines, interrupting 986 // all goroutines at GC safe points and records reason as the reason 987 // for the stop. On return, only the current goroutine's P is running. 988 // stopTheWorld must not be called from a system stack and the caller 989 // must not hold worldsema. The caller must call startTheWorld when 990 // other P's should resume execution. 991 // 992 // stopTheWorld is safe for multiple goroutines to call at the 993 // same time. Each will execute its own stop, and the stops will 994 // be serialized. 995 // 996 // This is also used by routines that do stack dumps. If the system is 997 // in panic or being exited, this may not reliably stop all 998 // goroutines. 999 func stopTheWorld(reason string) { 1000 semacquire(&worldsema) 1001 getg().m.preemptoff = reason 1002 systemstack(stopTheWorldWithSema) 1003 } 1004 1005 // startTheWorld undoes the effects of stopTheWorld. 1006 func startTheWorld() { 1007 systemstack(func() { startTheWorldWithSema(false) }) 1008 // worldsema must be held over startTheWorldWithSema to ensure 1009 // gomaxprocs cannot change while worldsema is held. 1010 semrelease(&worldsema) 1011 getg().m.preemptoff = "" 1012 } 1013 1014 // Holding worldsema grants an M the right to try to stop the world 1015 // and prevents gomaxprocs from changing concurrently. 1016 var worldsema uint32 = 1 1017 1018 // stopTheWorldWithSema is the core implementation of stopTheWorld. 1019 // The caller is responsible for acquiring worldsema and disabling 1020 // preemption first and then should stopTheWorldWithSema on the system 1021 // stack: 1022 // 1023 // semacquire(&worldsema, 0) 1024 // m.preemptoff = "reason" 1025 // systemstack(stopTheWorldWithSema) 1026 // 1027 // When finished, the caller must either call startTheWorld or undo 1028 // these three operations separately: 1029 // 1030 // m.preemptoff = "" 1031 // systemstack(startTheWorldWithSema) 1032 // semrelease(&worldsema) 1033 // 1034 // It is allowed to acquire worldsema once and then execute multiple 1035 // startTheWorldWithSema/stopTheWorldWithSema pairs. 1036 // Other P's are able to execute between successive calls to 1037 // startTheWorldWithSema and stopTheWorldWithSema. 1038 // Holding worldsema causes any other goroutines invoking 1039 // stopTheWorld to block. 1040 func stopTheWorldWithSema() { 1041 _g_ := getg() 1042 1043 // If we hold a lock, then we won't be able to stop another M 1044 // that is blocked trying to acquire the lock. 1045 if _g_.m.locks > 0 { 1046 throw("stopTheWorld: holding locks") 1047 } 1048 1049 lock(&sched.lock) 1050 sched.stopwait = gomaxprocs 1051 atomic.Store(&sched.gcwaiting, 1) 1052 preemptall() 1053 // stop current P 1054 _g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic. 1055 sched.stopwait-- 1056 // try to retake all P's in Psyscall status 1057 for _, p := range allp { 1058 s := p.status 1059 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) { 1060 if trace.enabled { 1061 traceGoSysBlock(p) 1062 traceProcStop(p) 1063 } 1064 p.syscalltick++ 1065 sched.stopwait-- 1066 } 1067 } 1068 // stop idle P's 1069 for { 1070 p := pidleget() 1071 if p == nil { 1072 break 1073 } 1074 p.status = _Pgcstop 1075 sched.stopwait-- 1076 } 1077 wait := sched.stopwait > 0 1078 unlock(&sched.lock) 1079 1080 // wait for remaining P's to stop voluntarily 1081 if wait { 1082 for { 1083 // wait for 100us, then try to re-preempt in case of any races 1084 if notetsleep(&sched.stopnote, 100*1000) { 1085 noteclear(&sched.stopnote) 1086 break 1087 } 1088 preemptall() 1089 } 1090 } 1091 1092 // sanity checks 1093 bad := "" 1094 if sched.stopwait != 0 { 1095 bad = "stopTheWorld: not stopped (stopwait != 0)" 1096 } else { 1097 for _, p := range allp { 1098 if p.status != _Pgcstop { 1099 bad = "stopTheWorld: not stopped (status != _Pgcstop)" 1100 } 1101 } 1102 } 1103 if atomic.Load(&freezing) != 0 { 1104 // Some other thread is panicking. This can cause the 1105 // sanity checks above to fail if the panic happens in 1106 // the signal handler on a stopped thread. Either way, 1107 // we should halt this thread. 1108 lock(&deadlock) 1109 lock(&deadlock) 1110 } 1111 if bad != "" { 1112 throw(bad) 1113 } 1114 } 1115 1116 func mhelpgc() { 1117 _g_ := getg() 1118 _g_.m.helpgc = -1 1119 } 1120 1121 func startTheWorldWithSema(emitTraceEvent bool) int64 { 1122 _g_ := getg() 1123 1124 _g_.m.locks++ // disable preemption because it can be holding p in a local var 1125 if netpollinited() { 1126 gp := netpoll(false) // non-blocking 1127 injectglist(gp) 1128 } 1129 add := needaddgcproc() 1130 lock(&sched.lock) 1131 1132 procs := gomaxprocs 1133 if newprocs != 0 { 1134 procs = newprocs 1135 newprocs = 0 1136 } 1137 p1 := procresize(procs) 1138 sched.gcwaiting = 0 1139 if sched.sysmonwait != 0 { 1140 sched.sysmonwait = 0 1141 notewakeup(&sched.sysmonnote) 1142 } 1143 unlock(&sched.lock) 1144 1145 for p1 != nil { 1146 p := p1 1147 p1 = p1.link.ptr() 1148 if p.m != 0 { 1149 mp := p.m.ptr() 1150 p.m = 0 1151 if mp.nextp != 0 { 1152 throw("startTheWorld: inconsistent mp->nextp") 1153 } 1154 mp.nextp.set(p) 1155 notewakeup(&mp.park) 1156 } else { 1157 // Start M to run P. Do not start another M below. 1158 newm(nil, p) 1159 add = false 1160 } 1161 } 1162 1163 // Capture start-the-world time before doing clean-up tasks. 1164 startTime := nanotime() 1165 if emitTraceEvent { 1166 traceGCSTWDone() 1167 } 1168 1169 // Wakeup an additional proc in case we have excessive runnable goroutines 1170 // in local queues or in the global queue. If we don't, the proc will park itself. 1171 // If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 1172 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 1173 wakep() 1174 } 1175 1176 if add { 1177 // If GC could have used another helper proc, start one now, 1178 // in the hope that it will be available next time. 1179 // It would have been even better to start it before the collection, 1180 // but doing so requires allocating memory, so it's tricky to 1181 // coordinate. This lazy approach works out in practice: 1182 // we don't mind if the first couple gc rounds don't have quite 1183 // the maximum number of procs. 1184 newm(mhelpgc, nil) 1185 } 1186 _g_.m.locks-- 1187 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1188 _g_.stackguard0 = stackPreempt 1189 } 1190 1191 return startTime 1192 } 1193 1194 // Called to start an M. 1195 // 1196 // This must not split the stack because we may not even have stack 1197 // bounds set up yet. 1198 // 1199 // May run during STW (because it doesn't have a P yet), so write 1200 // barriers are not allowed. 1201 // 1202 //go:nosplit 1203 //go:nowritebarrierrec 1204 func mstart() { 1205 _g_ := getg() // 这里应该会拿到g0 1206 1207 osStack := _g_.stack.lo == 0 1208 if osStack { 1209 // Initialize stack bounds from system stack. 1210 // Cgo may have left stack size in stack.hi. 1211 size := _g_.stack.hi 1212 if size == 0 { 1213 size = 8192 * sys.StackGuardMultiplier 1214 } 1215 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size))) 1216 _g_.stack.lo = _g_.stack.hi - size + 1024 1217 } 1218 // Initialize stack guards so that we can start calling 1219 // both Go and C functions with stack growth prologues. 1220 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1221 _g_.stackguard1 = _g_.stackguard0 1222 mstart1(0) 1223 1224 // Exit this thread. 1225 if GOOS == "windows" || GOOS == "solaris" || GOOS == "plan9" { 1226 // Window, Solaris and Plan 9 always system-allocate 1227 // the stack, but put it in _g_.stack before mstart, 1228 // so the logic above hasn't set osStack yet. 1229 osStack = true 1230 } 1231 mexit(osStack) 1232 } 1233 1234 func mstart1(dummy int32) { 1235 _g_ := getg() 1236 1237 if _g_ != _g_.m.g0 { 1238 throw("bad runtime·mstart") 1239 } 1240 1241 // Record the caller for use as the top of stack in mcall and 1242 // for terminating the thread. 1243 // We're never coming back to mstart1 after we call schedule, 1244 // so other calls can reuse the current frame. 1245 save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy))) 1246 asminit() 1247 minit() 1248 1249 // Install signal handlers; after minit so that minit can 1250 // prepare the thread to be able to handle the signals. 1251 if _g_.m == &m0 { 1252 mstartm0() 1253 } 1254 1255 if fn := _g_.m.mstartfn; fn != nil { 1256 fn() 1257 } 1258 1259 if _g_.m.helpgc != 0 { 1260 _g_.m.helpgc = 0 1261 stopm() 1262 } else if _g_.m != &m0 { 1263 acquirep(_g_.m.nextp.ptr()) 1264 _g_.m.nextp = 0 1265 } 1266 schedule() 1267 } 1268 1269 // mstartm0 implements part of mstart1 that only runs on the m0. 1270 // 1271 // Write barriers are allowed here because we know the GC can't be 1272 // running yet, so they'll be no-ops. 1273 // 1274 //go:yeswritebarrierrec 1275 func mstartm0() { 1276 // Create an extra M for callbacks on threads not created by Go. 1277 if iscgo && !cgoHasExtraM { 1278 cgoHasExtraM = true 1279 newextram() 1280 } 1281 initsig(false) 1282 } 1283 1284 // mexit tears down and exits the current thread. 1285 // 1286 // Don't call this directly to exit the thread, since it must run at 1287 // the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to 1288 // unwind the stack to the point that exits the thread. 1289 // 1290 // It is entered with m.p != nil, so write barriers are allowed. It 1291 // will release the P before exiting. 1292 // 1293 //go:yeswritebarrierrec 1294 func mexit(osStack bool) { 1295 g := getg() 1296 m := g.m 1297 1298 if m == &m0 { 1299 // This is the main thread. Just wedge it. 1300 // 1301 // On Linux, exiting the main thread puts the process 1302 // into a non-waitable zombie state. On Plan 9, 1303 // exiting the main thread unblocks wait even though 1304 // other threads are still running. On Solaris we can 1305 // neither exitThread nor return from mstart. Other 1306 // bad things probably happen on other platforms. 1307 // 1308 // We could try to clean up this M more before wedging 1309 // it, but that complicates signal handling. 1310 handoffp(releasep()) 1311 lock(&sched.lock) 1312 sched.nmfreed++ 1313 checkdead() 1314 unlock(&sched.lock) 1315 notesleep(&m.park) 1316 throw("locked m0 woke up") 1317 } 1318 1319 sigblock() 1320 unminit() 1321 1322 // Free the gsignal stack. 1323 if m.gsignal != nil { 1324 stackfree(m.gsignal.stack) 1325 } 1326 1327 // Remove m from allm. 1328 lock(&sched.lock) 1329 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink { 1330 if *pprev == m { 1331 *pprev = m.alllink 1332 goto found 1333 } 1334 } 1335 throw("m not found in allm") 1336 found: 1337 if !osStack { 1338 // Delay reaping m until it's done with the stack. 1339 // 1340 // If this is using an OS stack, the OS will free it 1341 // so there's no need for reaping. 1342 atomic.Store(&m.freeWait, 1) 1343 // Put m on the free list, though it will not be reaped until 1344 // freeWait is 0. Note that the free list must not be linked 1345 // through alllink because some functions walk allm without 1346 // locking, so may be using alllink. 1347 m.freelink = sched.freem 1348 sched.freem = m 1349 } 1350 unlock(&sched.lock) 1351 1352 // Release the P. 1353 handoffp(releasep()) 1354 // After this point we must not have write barriers. 1355 1356 // Invoke the deadlock detector. This must happen after 1357 // handoffp because it may have started a new M to take our 1358 // P's work. 1359 lock(&sched.lock) 1360 sched.nmfreed++ 1361 checkdead() 1362 unlock(&sched.lock) 1363 1364 if osStack { 1365 // Return from mstart and let the system thread 1366 // library free the g0 stack and terminate the thread. 1367 return 1368 } 1369 1370 // mstart is the thread's entry point, so there's nothing to 1371 // return to. Exit the thread directly. exitThread will clear 1372 // m.freeWait when it's done with the stack and the m can be 1373 // reaped. 1374 exitThread(&m.freeWait) 1375 } 1376 1377 // forEachP calls fn(p) for every P p when p reaches a GC safe point. 1378 // If a P is currently executing code, this will bring the P to a GC 1379 // safe point and execute fn on that P. If the P is not executing code 1380 // (it is idle or in a syscall), this will call fn(p) directly while 1381 // preventing the P from exiting its state. This does not ensure that 1382 // fn will run on every CPU executing Go code, but it acts as a global 1383 // memory barrier. GC uses this as a "ragged barrier." 1384 // 1385 // The caller must hold worldsema. 1386 // 1387 //go:systemstack 1388 func forEachP(fn func(*p)) { 1389 mp := acquirem() 1390 _p_ := getg().m.p.ptr() 1391 1392 lock(&sched.lock) 1393 if sched.safePointWait != 0 { 1394 throw("forEachP: sched.safePointWait != 0") 1395 } 1396 sched.safePointWait = gomaxprocs - 1 1397 sched.safePointFn = fn 1398 1399 // Ask all Ps to run the safe point function. 1400 for _, p := range allp { 1401 if p != _p_ { 1402 atomic.Store(&p.runSafePointFn, 1) 1403 } 1404 } 1405 preemptall() 1406 1407 // Any P entering _Pidle or _Psyscall from now on will observe 1408 // p.runSafePointFn == 1 and will call runSafePointFn when 1409 // changing its status to _Pidle/_Psyscall. 1410 1411 // Run safe point function for all idle Ps. sched.pidle will 1412 // not change because we hold sched.lock. 1413 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() { 1414 if atomic.Cas(&p.runSafePointFn, 1, 0) { 1415 fn(p) 1416 sched.safePointWait-- 1417 } 1418 } 1419 1420 wait := sched.safePointWait > 0 1421 unlock(&sched.lock) 1422 1423 // Run fn for the current P. 1424 fn(_p_) 1425 1426 // Force Ps currently in _Psyscall into _Pidle and hand them 1427 // off to induce safe point function execution. 1428 for _, p := range allp { 1429 s := p.status 1430 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) { 1431 if trace.enabled { 1432 traceGoSysBlock(p) 1433 traceProcStop(p) 1434 } 1435 p.syscalltick++ 1436 handoffp(p) 1437 } 1438 } 1439 1440 // Wait for remaining Ps to run fn. 1441 if wait { 1442 for { 1443 // Wait for 100us, then try to re-preempt in 1444 // case of any races. 1445 // 1446 // Requires system stack. 1447 if notetsleep(&sched.safePointNote, 100*1000) { 1448 noteclear(&sched.safePointNote) 1449 break 1450 } 1451 preemptall() 1452 } 1453 } 1454 if sched.safePointWait != 0 { 1455 throw("forEachP: not done") 1456 } 1457 for _, p := range allp { 1458 if p.runSafePointFn != 0 { 1459 throw("forEachP: P did not run fn") 1460 } 1461 } 1462 1463 lock(&sched.lock) 1464 sched.safePointFn = nil 1465 unlock(&sched.lock) 1466 releasem(mp) 1467 } 1468 1469 // runSafePointFn runs the safe point function, if any, for this P. 1470 // This should be called like 1471 // 1472 // if getg().m.p.runSafePointFn != 0 { 1473 // runSafePointFn() 1474 // } 1475 // 1476 // runSafePointFn must be checked on any transition in to _Pidle or 1477 // _Psyscall to avoid a race where forEachP sees that the P is running 1478 // just before the P goes into _Pidle/_Psyscall and neither forEachP 1479 // nor the P run the safe-point function. 1480 func runSafePointFn() { 1481 p := getg().m.p.ptr() 1482 // Resolve the race between forEachP running the safe-point 1483 // function on this P's behalf and this P running the 1484 // safe-point function directly. 1485 if !atomic.Cas(&p.runSafePointFn, 1, 0) { 1486 return 1487 } 1488 sched.safePointFn(p) 1489 lock(&sched.lock) 1490 sched.safePointWait-- 1491 if sched.safePointWait == 0 { 1492 notewakeup(&sched.safePointNote) 1493 } 1494 unlock(&sched.lock) 1495 } 1496 1497 // When running with cgo, we call _cgo_thread_start 1498 // to start threads for us so that we can play nicely with 1499 // foreign code. 1500 var cgoThreadStart unsafe.Pointer 1501 1502 type cgothreadstart struct { 1503 g guintptr 1504 tls *uint64 1505 fn unsafe.Pointer 1506 } 1507 1508 // Allocate a new m unassociated with any thread. 1509 // Can use p for allocation context if needed. 1510 // fn is recorded as the new m's m.mstartfn. 1511 // 1512 // This function is allowed to have write barriers even if the caller 1513 // isn't because it borrows _p_. 1514 // 1515 //go:yeswritebarrierrec 1516 func allocm(_p_ *p, fn func()) *m { 1517 _g_ := getg() 1518 _g_.m.locks++ // disable GC because it can be called from sysmon 1519 if _g_.m.p == 0 { 1520 acquirep(_p_) // temporarily borrow p for mallocs in this function 1521 } 1522 1523 // Release the free M list. We need to do this somewhere and 1524 // this may free up a stack we can use. 1525 if sched.freem != nil { 1526 lock(&sched.lock) 1527 var newList *m 1528 for freem := sched.freem; freem != nil; { 1529 if freem.freeWait != 0 { 1530 next := freem.freelink 1531 freem.freelink = newList 1532 newList = freem 1533 freem = next 1534 continue 1535 } 1536 stackfree(freem.g0.stack) 1537 freem = freem.freelink 1538 } 1539 sched.freem = newList 1540 unlock(&sched.lock) 1541 } 1542 1543 mp := new(m) 1544 mp.mstartfn = fn 1545 mcommoninit(mp) 1546 1547 // In case of cgo or Solaris, pthread_create will make us a stack. 1548 // Windows and Plan 9 will layout sched stack on OS stack. 1549 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" { 1550 mp.g0 = malg(-1) 1551 } else { 1552 mp.g0 = malg(8192 * sys.StackGuardMultiplier) 1553 } 1554 mp.g0.m = mp 1555 1556 if _p_ == _g_.m.p.ptr() { 1557 releasep() 1558 } 1559 _g_.m.locks-- 1560 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 1561 _g_.stackguard0 = stackPreempt 1562 } 1563 1564 return mp 1565 } 1566 1567 // needm is called when a cgo callback happens on a 1568 // thread without an m (a thread not created by Go). 1569 // In this case, needm is expected to find an m to use 1570 // and return with m, g initialized correctly. 1571 // Since m and g are not set now (likely nil, but see below) 1572 // needm is limited in what routines it can call. In particular 1573 // it can only call nosplit functions (textflag 7) and cannot 1574 // do any scheduling that requires an m. 1575 // 1576 // In order to avoid needing heavy lifting here, we adopt 1577 // the following strategy: there is a stack of available m's 1578 // that can be stolen. Using compare-and-swap 1579 // to pop from the stack has ABA races, so we simulate 1580 // a lock by doing an exchange (via casp) to steal the stack 1581 // head and replace the top pointer with MLOCKED (1). 1582 // This serves as a simple spin lock that we can use even 1583 // without an m. The thread that locks the stack in this way 1584 // unlocks the stack by storing a valid stack head pointer. 1585 // 1586 // In order to make sure that there is always an m structure 1587 // available to be stolen, we maintain the invariant that there 1588 // is always one more than needed. At the beginning of the 1589 // program (if cgo is in use) the list is seeded with a single m. 1590 // If needm finds that it has taken the last m off the list, its job 1591 // is - once it has installed its own m so that it can do things like 1592 // allocate memory - to create a spare m and put it on the list. 1593 // 1594 // Each of these extra m's also has a g0 and a curg that are 1595 // pressed into service as the scheduling stack and current 1596 // goroutine for the duration of the cgo callback. 1597 // 1598 // When the callback is done with the m, it calls dropm to 1599 // put the m back on the list. 1600 //go:nosplit 1601 func needm(x byte) { 1602 if iscgo && !cgoHasExtraM { 1603 // Can happen if C/C++ code calls Go from a global ctor. 1604 // Can not throw, because scheduler is not initialized yet. 1605 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback))) 1606 exit(1) 1607 } 1608 1609 // Lock extra list, take head, unlock popped list. 1610 // nilokay=false is safe here because of the invariant above, 1611 // that the extra list always contains or will soon contain 1612 // at least one m. 1613 mp := lockextra(false) 1614 1615 // Set needextram when we've just emptied the list, 1616 // so that the eventual call into cgocallbackg will 1617 // allocate a new m for the extra list. We delay the 1618 // allocation until then so that it can be done 1619 // after exitsyscall makes sure it is okay to be 1620 // running at all (that is, there's no garbage collection 1621 // running right now). 1622 mp.needextram = mp.schedlink == 0 1623 extraMCount-- 1624 unlockextra(mp.schedlink.ptr()) 1625 1626 // Save and block signals before installing g. 1627 // Once g is installed, any incoming signals will try to execute, 1628 // but we won't have the sigaltstack settings and other data 1629 // set up appropriately until the end of minit, which will 1630 // unblock the signals. This is the same dance as when 1631 // starting a new m to run Go code via newosproc. 1632 msigsave(mp) 1633 sigblock() 1634 1635 // Install g (= m->g0) and set the stack bounds 1636 // to match the current stack. We don't actually know 1637 // how big the stack is, like we don't know how big any 1638 // scheduling stack is, but we assume there's at least 32 kB, 1639 // which is more than enough for us. 1640 setg(mp.g0) 1641 _g_ := getg() 1642 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024 1643 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024 1644 _g_.stackguard0 = _g_.stack.lo + _StackGuard 1645 1646 // Initialize this thread to use the m. 1647 asminit() 1648 minit() 1649 1650 // mp.curg is now a real goroutine. 1651 casgstatus(mp.curg, _Gdead, _Gsyscall) 1652 atomic.Xadd(&sched.ngsys, -1) 1653 } 1654 1655 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n") 1656 1657 // newextram allocates m's and puts them on the extra list. 1658 // It is called with a working local m, so that it can do things 1659 // like call schedlock and allocate. 1660 func newextram() { 1661 c := atomic.Xchg(&extraMWaiters, 0) 1662 if c > 0 { 1663 for i := uint32(0); i < c; i++ { 1664 oneNewExtraM() 1665 } 1666 } else { 1667 // Make sure there is at least one extra M. 1668 mp := lockextra(true) 1669 unlockextra(mp) 1670 if mp == nil { 1671 oneNewExtraM() 1672 } 1673 } 1674 } 1675 1676 // oneNewExtraM allocates an m and puts it on the extra list. 1677 func oneNewExtraM() { 1678 // Create extra goroutine locked to extra m. 1679 // The goroutine is the context in which the cgo callback will run. 1680 // The sched.pc will never be returned to, but setting it to 1681 // goexit makes clear to the traceback routines where 1682 // the goroutine stack ends. 1683 mp := allocm(nil, nil) 1684 gp := malg(4096) 1685 gp.sched.pc = funcPC(goexit) + sys.PCQuantum 1686 gp.sched.sp = gp.stack.hi 1687 gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame 1688 gp.sched.lr = 0 1689 gp.sched.g = guintptr(unsafe.Pointer(gp)) 1690 gp.syscallpc = gp.sched.pc 1691 gp.syscallsp = gp.sched.sp 1692 gp.stktopsp = gp.sched.sp 1693 gp.gcscanvalid = true 1694 gp.gcscandone = true 1695 // malg returns status as _Gidle. Change to _Gdead before 1696 // adding to allg where GC can see it. We use _Gdead to hide 1697 // this from tracebacks and stack scans since it isn't a 1698 // "real" goroutine until needm grabs it. 1699 casgstatus(gp, _Gidle, _Gdead) 1700 gp.m = mp 1701 mp.curg = gp 1702 mp.lockedInt++ 1703 mp.lockedg.set(gp) 1704 gp.lockedm.set(mp) 1705 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1)) 1706 if raceenabled { 1707 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum) 1708 } 1709 // put on allg for garbage collector 1710 allgadd(gp) 1711 1712 // gp is now on the allg list, but we don't want it to be 1713 // counted by gcount. It would be more "proper" to increment 1714 // sched.ngfree, but that requires locking. Incrementing ngsys 1715 // has the same effect. 1716 atomic.Xadd(&sched.ngsys, +1) 1717 1718 // Add m to the extra list. 1719 mnext := lockextra(true) 1720 mp.schedlink.set(mnext) 1721 extraMCount++ 1722 unlockextra(mp) 1723 } 1724 1725 // dropm is called when a cgo callback has called needm but is now 1726 // done with the callback and returning back into the non-Go thread. 1727 // It puts the current m back onto the extra list. 1728 // 1729 // The main expense here is the call to signalstack to release the 1730 // m's signal stack, and then the call to needm on the next callback 1731 // from this thread. It is tempting to try to save the m for next time, 1732 // which would eliminate both these costs, but there might not be 1733 // a next time: the current thread (which Go does not control) might exit. 1734 // If we saved the m for that thread, there would be an m leak each time 1735 // such a thread exited. Instead, we acquire and release an m on each 1736 // call. These should typically not be scheduling operations, just a few 1737 // atomics, so the cost should be small. 1738 // 1739 // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread 1740 // variable using pthread_key_create. Unlike the pthread keys we already use 1741 // on OS X, this dummy key would never be read by Go code. It would exist 1742 // only so that we could register at thread-exit-time destructor. 1743 // That destructor would put the m back onto the extra list. 1744 // This is purely a performance optimization. The current version, 1745 // in which dropm happens on each cgo call, is still correct too. 1746 // We may have to keep the current version on systems with cgo 1747 // but without pthreads, like Windows. 1748 func dropm() { 1749 // Clear m and g, and return m to the extra list. 1750 // After the call to setg we can only call nosplit functions 1751 // with no pointer manipulation. 1752 mp := getg().m 1753 1754 // Return mp.curg to dead state. 1755 casgstatus(mp.curg, _Gsyscall, _Gdead) 1756 atomic.Xadd(&sched.ngsys, +1) 1757 1758 // Block signals before unminit. 1759 // Unminit unregisters the signal handling stack (but needs g on some systems). 1760 // Setg(nil) clears g, which is the signal handler's cue not to run Go handlers. 1761 // It's important not to try to handle a signal between those two steps. 1762 sigmask := mp.sigmask 1763 sigblock() 1764 unminit() 1765 1766 mnext := lockextra(true) 1767 extraMCount++ 1768 mp.schedlink.set(mnext) 1769 1770 setg(nil) 1771 1772 // Commit the release of mp. 1773 unlockextra(mp) 1774 1775 msigrestore(sigmask) 1776 } 1777 1778 // A helper function for EnsureDropM. 1779 func getm() uintptr { 1780 return uintptr(unsafe.Pointer(getg().m)) 1781 } 1782 1783 var extram uintptr 1784 var extraMCount uint32 // Protected by lockextra 1785 var extraMWaiters uint32 1786 1787 // lockextra locks the extra list and returns the list head. 1788 // The caller must unlock the list by storing a new list head 1789 // to extram. If nilokay is true, then lockextra will 1790 // return a nil list head if that's what it finds. If nilokay is false, 1791 // lockextra will keep waiting until the list head is no longer nil. 1792 //go:nosplit 1793 func lockextra(nilokay bool) *m { 1794 const locked = 1 1795 1796 incr := false 1797 for { 1798 old := atomic.Loaduintptr(&extram) 1799 if old == locked { 1800 yield := osyield 1801 yield() 1802 continue 1803 } 1804 if old == 0 && !nilokay { 1805 if !incr { 1806 // Add 1 to the number of threads 1807 // waiting for an M. 1808 // This is cleared by newextram. 1809 atomic.Xadd(&extraMWaiters, 1) 1810 incr = true 1811 } 1812 usleep(1) 1813 continue 1814 } 1815 if atomic.Casuintptr(&extram, old, locked) { 1816 return (*m)(unsafe.Pointer(old)) 1817 } 1818 yield := osyield 1819 yield() 1820 continue 1821 } 1822 } 1823 1824 //go:nosplit 1825 func unlockextra(mp *m) { 1826 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp))) 1827 } 1828 1829 // execLock serializes exec and clone to avoid bugs or unspecified behaviour 1830 // around exec'ing while creating/destroying threads. See issue #19546. 1831 var execLock rwmutex 1832 1833 // newmHandoff contains a list of m structures that need new OS threads. 1834 // This is used by newm in situations where newm itself can't safely 1835 // start an OS thread. 1836 var newmHandoff struct { 1837 lock mutex 1838 1839 // newm points to a list of M structures that need new OS 1840 // threads. The list is linked through m.schedlink. 1841 newm muintptr 1842 1843 // waiting indicates that wake needs to be notified when an m 1844 // is put on the list. 1845 waiting bool 1846 wake note 1847 1848 // haveTemplateThread indicates that the templateThread has 1849 // been started. This is not protected by lock. Use cas to set 1850 // to 1. 1851 haveTemplateThread uint32 1852 } 1853 1854 // Create a new m. It will start off with a call to fn, or else the scheduler. 1855 // fn needs to be static and not a heap allocated closure. 1856 // May run with m.p==nil, so write barriers are not allowed. 1857 //go:nowritebarrierrec 1858 func newm(fn func(), _p_ *p) { 1859 mp := allocm(_p_, fn) 1860 mp.nextp.set(_p_) 1861 mp.sigmask = initSigmask 1862 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" { 1863 // We're on a locked M or a thread that may have been 1864 // started by C. The kernel state of this thread may 1865 // be strange (the user may have locked it for that 1866 // purpose). We don't want to clone that into another 1867 // thread. Instead, ask a known-good thread to create 1868 // the thread for us. 1869 // 1870 // This is disabled on Plan 9. See golang.org/issue/22227. 1871 // 1872 // TODO: This may be unnecessary on Windows, which 1873 // doesn't model thread creation off fork. 1874 lock(&newmHandoff.lock) 1875 if newmHandoff.haveTemplateThread == 0 { 1876 throw("on a locked thread with no template thread") 1877 } 1878 mp.schedlink = newmHandoff.newm 1879 newmHandoff.newm.set(mp) 1880 if newmHandoff.waiting { 1881 newmHandoff.waiting = false 1882 notewakeup(&newmHandoff.wake) 1883 } 1884 unlock(&newmHandoff.lock) 1885 return 1886 } 1887 newm1(mp) 1888 } 1889 1890 func newm1(mp *m) { 1891 if iscgo { 1892 var ts cgothreadstart 1893 if _cgo_thread_start == nil { 1894 throw("_cgo_thread_start missing") 1895 } 1896 ts.g.set(mp.g0) 1897 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0])) 1898 ts.fn = unsafe.Pointer(funcPC(mstart)) 1899 if msanenabled { 1900 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts)) 1901 } 1902 execLock.rlock() // Prevent process clone. 1903 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts)) 1904 execLock.runlock() 1905 return 1906 } 1907 execLock.rlock() // Prevent process clone. 1908 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi)) 1909 execLock.runlock() 1910 } 1911 1912 // startTemplateThread starts the template thread if it is not already 1913 // running. 1914 // 1915 // The calling thread must itself be in a known-good state. 1916 func startTemplateThread() { 1917 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) { 1918 return 1919 } 1920 newm(templateThread, nil) 1921 } 1922 1923 // tmeplateThread is a thread in a known-good state that exists solely 1924 // to start new threads in known-good states when the calling thread 1925 // may not be a a good state. 1926 // 1927 // Many programs never need this, so templateThread is started lazily 1928 // when we first enter a state that might lead to running on a thread 1929 // in an unknown state. 1930 // 1931 // templateThread runs on an M without a P, so it must not have write 1932 // barriers. 1933 // 1934 //go:nowritebarrierrec 1935 func templateThread() { 1936 lock(&sched.lock) 1937 sched.nmsys++ 1938 checkdead() 1939 unlock(&sched.lock) 1940 1941 for { 1942 lock(&newmHandoff.lock) 1943 for newmHandoff.newm != 0 { 1944 newm := newmHandoff.newm.ptr() 1945 newmHandoff.newm = 0 1946 unlock(&newmHandoff.lock) 1947 for newm != nil { 1948 next := newm.schedlink.ptr() 1949 newm.schedlink = 0 1950 newm1(newm) 1951 newm = next 1952 } 1953 lock(&newmHandoff.lock) 1954 } 1955 newmHandoff.waiting = true 1956 noteclear(&newmHandoff.wake) 1957 unlock(&newmHandoff.lock) 1958 notesleep(&newmHandoff.wake) 1959 } 1960 } 1961 1962 // Stops execution of the current m until new work is available. 1963 // Returns with acquired P. 1964 func stopm() { 1965 _g_ := getg() 1966 1967 if _g_.m.locks != 0 { 1968 throw("stopm holding locks") 1969 } 1970 if _g_.m.p != 0 { 1971 throw("stopm holding p") 1972 } 1973 if _g_.m.spinning { 1974 throw("stopm spinning") 1975 } 1976 1977 retry: 1978 lock(&sched.lock) 1979 mput(_g_.m) 1980 unlock(&sched.lock) 1981 notesleep(&_g_.m.park) 1982 noteclear(&_g_.m.park) 1983 if _g_.m.helpgc != 0 { 1984 // helpgc() set _g_.m.p and _g_.m.mcache, so we have a P. 1985 gchelper() 1986 // Undo the effects of helpgc(). 1987 _g_.m.helpgc = 0 1988 _g_.m.mcache = nil 1989 _g_.m.p = 0 1990 goto retry 1991 } 1992 acquirep(_g_.m.nextp.ptr()) 1993 _g_.m.nextp = 0 1994 } 1995 1996 func mspinning() { 1997 // startm's caller incremented nmspinning. Set the new M's spinning. 1998 getg().m.spinning = true 1999 } 2000 2001 // Schedules some M to run the p (creates an M if necessary). 2002 // 调度m来运行p 2003 // If p==nil, tries to get an idle P, if no idle P's does nothing. 2004 // May run with m.p==nil, so write barriers are not allowed. 2005 // If spinning is set, the caller has incremented nmspinning and startm will 2006 // either decrement nmspinning or set m.spinning in the newly started M. 2007 //go:nowritebarrierrec 2008 func startm(_p_ *p, spinning bool) { 2009 lock(&sched.lock) 2010 if _p_ == nil { 2011 _p_ = pidleget() 2012 if _p_ == nil { 2013 unlock(&sched.lock) 2014 if spinning { 2015 // The caller incremented nmspinning, but there are no idle Ps, 2016 // so it's okay to just undo the increment and give up. 2017 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2018 throw("startm: negative nmspinning") 2019 } 2020 } 2021 return 2022 } 2023 } 2024 mp := mget() 2025 unlock(&sched.lock) 2026 if mp == nil { 2027 var fn func() 2028 if spinning { 2029 // The caller incremented nmspinning, so set m.spinning in the new M. 2030 fn = mspinning 2031 } 2032 newm(fn, _p_) 2033 return 2034 } 2035 if mp.spinning { 2036 throw("startm: m is spinning") 2037 } 2038 if mp.nextp != 0 { 2039 throw("startm: m has p") 2040 } 2041 if spinning && !runqempty(_p_) { 2042 throw("startm: p has runnable gs") 2043 } 2044 // The caller incremented nmspinning, so set m.spinning in the new M. 2045 mp.spinning = spinning 2046 mp.nextp.set(_p_) 2047 notewakeup(&mp.park) 2048 } 2049 2050 // Hands off P from syscall or locked M. 2051 // Always runs without a P, so write barriers are not allowed. 2052 //go:nowritebarrierrec 2053 func handoffp(_p_ *p) { 2054 // handoffp must start an M in any situation where 2055 // findrunnable would return a G to run on _p_. 2056 2057 // if it has local work, start it straight away 2058 // 若有可运行的g, 就直接拿一个m来运行这个p 2059 if !runqempty(_p_) || sched.runqsize != 0 { 2060 startm(_p_, false) 2061 return 2062 } 2063 // if it has GC work, start it straight away 2064 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) { 2065 startm(_p_, false) 2066 return 2067 } 2068 // no local work, check that there are no spinning/idle M's, 2069 // otherwise our help is not required 2070 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic 2071 startm(_p_, true) 2072 return 2073 } 2074 lock(&sched.lock) 2075 if sched.gcwaiting != 0 { 2076 _p_.status = _Pgcstop 2077 sched.stopwait-- 2078 if sched.stopwait == 0 { 2079 notewakeup(&sched.stopnote) 2080 } 2081 unlock(&sched.lock) 2082 return 2083 } 2084 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) { 2085 sched.safePointFn(_p_) 2086 sched.safePointWait-- 2087 if sched.safePointWait == 0 { 2088 notewakeup(&sched.safePointNote) 2089 } 2090 } 2091 if sched.runqsize != 0 { 2092 unlock(&sched.lock) 2093 startm(_p_, false) 2094 return 2095 } 2096 // If this is the last running P and nobody is polling network, 2097 // need to wakeup another M to poll network. 2098 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 { 2099 unlock(&sched.lock) 2100 startm(_p_, false) 2101 return 2102 } 2103 pidleput(_p_) 2104 unlock(&sched.lock) 2105 } 2106 2107 // Tries to add one more P to execute G's. 2108 // Called when a G is made runnable (newproc, ready). 2109 func wakep() { 2110 // be conservative about spinning threads 2111 // 多线程(m)同时调用产生新m时只有一个真的创建m 2112 if !atomic.Cas(&sched.nmspinning, 0, 1) { 2113 return 2114 } 2115 startm(nil, true) 2116 } 2117 2118 // Stops execution of the current m that is locked to a g until the g is runnable again. 2119 // Returns with acquired P. 2120 func stoplockedm() { 2121 _g_ := getg() 2122 2123 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m { 2124 throw("stoplockedm: inconsistent locking") 2125 } 2126 if _g_.m.p != 0 { 2127 // Schedule another M to run this p. 2128 _p_ := releasep() 2129 handoffp(_p_) 2130 } 2131 incidlelocked(1) 2132 // Wait until another thread schedules lockedg again. 2133 notesleep(&_g_.m.park) 2134 noteclear(&_g_.m.park) 2135 status := readgstatus(_g_.m.lockedg.ptr()) 2136 if status&^_Gscan != _Grunnable { 2137 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n") 2138 dumpgstatus(_g_) 2139 throw("stoplockedm: not runnable") 2140 } 2141 acquirep(_g_.m.nextp.ptr()) 2142 _g_.m.nextp = 0 2143 } 2144 2145 // Schedules the locked m to run the locked gp. 2146 // May run during STW, so write barriers are not allowed. 2147 //go:nowritebarrierrec 2148 func startlockedm(gp *g) { 2149 _g_ := getg() 2150 2151 mp := gp.lockedm.ptr() 2152 if mp == _g_.m { 2153 throw("startlockedm: locked to me") 2154 } 2155 if mp.nextp != 0 { 2156 throw("startlockedm: m has p") 2157 } 2158 // directly handoff current P to the locked m 2159 incidlelocked(-1) 2160 _p_ := releasep() 2161 mp.nextp.set(_p_) 2162 notewakeup(&mp.park) 2163 stopm() 2164 } 2165 2166 // Stops the current m for stopTheWorld. 2167 // Returns when the world is restarted. 2168 func gcstopm() { 2169 _g_ := getg() 2170 2171 if sched.gcwaiting == 0 { 2172 throw("gcstopm: not waiting for gc") 2173 } 2174 if _g_.m.spinning { 2175 _g_.m.spinning = false 2176 // OK to just drop nmspinning here, 2177 // startTheWorld will unpark threads as necessary. 2178 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2179 throw("gcstopm: negative nmspinning") 2180 } 2181 } 2182 _p_ := releasep() 2183 lock(&sched.lock) 2184 _p_.status = _Pgcstop 2185 sched.stopwait-- 2186 if sched.stopwait == 0 { 2187 notewakeup(&sched.stopnote) 2188 } 2189 unlock(&sched.lock) 2190 stopm() 2191 } 2192 2193 // Schedules gp to run on the current M. 2194 // If inheritTime is true, gp inherits the remaining time in the 2195 // current time slice. Otherwise, it starts a new time slice. 2196 // Never returns. 2197 // 2198 // Write barriers are allowed because this is called immediately after 2199 // acquiring a P in several places. 2200 // 2201 //go:yeswritebarrierrec 2202 func execute(gp *g, inheritTime bool) { 2203 _g_ := getg() 2204 2205 casgstatus(gp, _Grunnable, _Grunning) 2206 gp.waitsince = 0 2207 gp.preempt = false 2208 gp.stackguard0 = gp.stack.lo + _StackGuard 2209 if !inheritTime { 2210 _g_.m.p.ptr().schedtick++ 2211 } 2212 _g_.m.curg = gp 2213 gp.m = _g_.m 2214 2215 // Check whether the profiler needs to be turned on or off. 2216 hz := sched.profilehz 2217 if _g_.m.profilehz != hz { 2218 setThreadCPUProfiler(hz) 2219 } 2220 2221 if trace.enabled { 2222 // GoSysExit has to happen when we have a P, but before GoStart. 2223 // So we emit it here. 2224 if gp.syscallsp != 0 && gp.sysblocktraced { 2225 traceGoSysExit(gp.sysexitticks) 2226 } 2227 traceGoStart() 2228 } 2229 2230 gogo(&gp.sched) 2231 } 2232 2233 // Finds a runnable goroutine to execute. 2234 // Tries to steal from other P's, get g from global queue, poll network. 2235 // 找一个可执行的goroutine执行 2236 // 尝试从其他p窃取, 从全局队列拿, 从poll network拿到poll到的goroutine(这个goroutine可被执行了) 2237 func findrunnable() (gp *g, inheritTime bool) { 2238 _g_ := getg() 2239 2240 // The conditions here and in handoffp must agree: if 2241 // findrunnable would return a G to run, handoffp must start 2242 // an M. 2243 2244 top: 2245 _p_ := _g_.m.p.ptr() 2246 if sched.gcwaiting != 0 { 2247 gcstopm() 2248 goto top 2249 } 2250 if _p_.runSafePointFn != 0 { 2251 runSafePointFn() 2252 } 2253 if fingwait && fingwake { 2254 if gp := wakefing(); gp != nil { 2255 ready(gp, 0, true) 2256 } 2257 } 2258 if *cgo_yield != nil { 2259 asmcgocall(*cgo_yield, nil) 2260 } 2261 2262 // local runq 2263 if gp, inheritTime := runqget(_p_); gp != nil { 2264 return gp, inheritTime 2265 } 2266 2267 // global runq 2268 if sched.runqsize != 0 { 2269 lock(&sched.lock) 2270 gp := globrunqget(_p_, 0) 2271 unlock(&sched.lock) 2272 if gp != nil { 2273 return gp, false 2274 } 2275 } 2276 2277 // Poll network. 2278 // This netpoll is only an optimization before we resort to stealing. 2279 // 一个在采取工作窃取前的一个优化 2280 // We can safely skip it if there are no waiters or a thread is blocked 2281 // in netpoll already. If there is any kind of logical race with that 2282 // blocked thread (e.g. it has already returned from netpoll, but does 2283 // not set lastpoll yet), this thread will do blocking netpoll below 2284 // anyway. 2285 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 { 2286 if gp := netpoll(false); gp != nil { // non-blocking 2287 // netpoll returns list of goroutines linked by schedlink. 2288 // netpool 返回runnable goroutine的列表,通过g的schedlink链接 2289 injectglist(gp.schedlink.ptr()) // 将这个列表的第二个开始的全部插入全局g列表(第一个留作返回, 被这个m占用了) 2290 casgstatus(gp, _Gwaiting, _Grunnable) 2291 if trace.enabled { 2292 traceGoUnpark(gp, 0) 2293 } 2294 return gp, false 2295 } 2296 } 2297 2298 // Steal work from other P's. 2299 procs := uint32(gomaxprocs) 2300 if atomic.Load(&sched.npidle) == procs-1 { 2301 // Either GOMAXPROCS=1 or everybody, except for us, is idle already. 2302 // New work can appear from returning syscall/cgocall, network or timers. 2303 // Neither of that submits to local run queues, so no point in stealing. 2304 goto stop 2305 } 2306 // If number of spinning M's >= number of busy P's, block. 2307 // This is necessary to prevent excessive CPU consumption 2308 // when GOMAXPROCS>>1 but the program parallelism is low. 2309 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) { 2310 goto stop 2311 } 2312 if !_g_.m.spinning { 2313 _g_.m.spinning = true 2314 atomic.Xadd(&sched.nmspinning, 1) 2315 } 2316 for i := 0; i < 4; i++ { 2317 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { 2318 if sched.gcwaiting != 0 { 2319 goto top 2320 } 2321 stealRunNextG := i > 2 // first look for ready queues with more than 1 g 2322 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil { 2323 return gp, false 2324 } 2325 } 2326 } 2327 2328 stop: 2329 2330 // We have nothing to do. If we're in the GC mark phase, can 2331 // safely scan and blacken objects, and have work to do, run 2332 // idle-time marking rather than give up the P. 2333 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) { 2334 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode 2335 gp := _p_.gcBgMarkWorker.ptr() 2336 casgstatus(gp, _Gwaiting, _Grunnable) 2337 if trace.enabled { 2338 traceGoUnpark(gp, 0) 2339 } 2340 return gp, false 2341 } 2342 2343 // Before we drop our P, make a snapshot of the allp slice, 2344 // which can change underfoot once we no longer block 2345 // safe-points. We don't need to snapshot the contents because 2346 // everything up to cap(allp) is immutable. 2347 allpSnapshot := allp 2348 2349 // return P and block 2350 lock(&sched.lock) 2351 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 { 2352 unlock(&sched.lock) 2353 goto top 2354 } 2355 if sched.runqsize != 0 { 2356 gp := globrunqget(_p_, 0) 2357 unlock(&sched.lock) 2358 return gp, false 2359 } 2360 if releasep() != _p_ { 2361 throw("findrunnable: wrong p") 2362 } 2363 pidleput(_p_) 2364 unlock(&sched.lock) 2365 2366 // Delicate dance: thread transitions from spinning to non-spinning state, 2367 // potentially concurrently with submission of new goroutines. We must 2368 // drop nmspinning first and then check all per-P queues again (with 2369 // #StoreLoad memory barrier in between). If we do it the other way around, 2370 // another thread can submit a goroutine after we've checked all run queues 2371 // but before we drop nmspinning; as the result nobody will unpark a thread 2372 // to run the goroutine. 2373 // If we discover new work below, we need to restore m.spinning as a signal 2374 // for resetspinning to unpark a new worker thread (because there can be more 2375 // than one starving goroutine). However, if after discovering new work 2376 // we also observe no idle Ps, it is OK to just park the current thread: 2377 // the system is fully loaded so no spinning threads are required. 2378 // Also see "Worker thread parking/unparking" comment at the top of the file. 2379 wasSpinning := _g_.m.spinning 2380 if _g_.m.spinning { 2381 _g_.m.spinning = false 2382 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 { 2383 throw("findrunnable: negative nmspinning") 2384 } 2385 } 2386 2387 // check all runqueues once again 2388 for _, _p_ := range allpSnapshot { 2389 if !runqempty(_p_) { 2390 lock(&sched.lock) 2391 _p_ = pidleget() 2392 unlock(&sched.lock) 2393 if _p_ != nil { 2394 acquirep(_p_) 2395 if wasSpinning { 2396 _g_.m.spinning = true 2397 atomic.Xadd(&sched.nmspinning, 1) 2398 } 2399 goto top 2400 } 2401 break 2402 } 2403 } 2404 2405 // Check for idle-priority GC work again. 2406 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) { 2407 lock(&sched.lock) 2408 _p_ = pidleget() 2409 if _p_ != nil && _p_.gcBgMarkWorker == 0 { 2410 pidleput(_p_) 2411 _p_ = nil 2412 } 2413 unlock(&sched.lock) 2414 if _p_ != nil { 2415 acquirep(_p_) 2416 if wasSpinning { 2417 _g_.m.spinning = true 2418 atomic.Xadd(&sched.nmspinning, 1) 2419 } 2420 // Go back to idle GC check. 2421 goto stop 2422 } 2423 } 2424 2425 // poll network 2426 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 { 2427 if _g_.m.p != 0 { 2428 throw("findrunnable: netpoll with p") 2429 } 2430 if _g_.m.spinning { 2431 throw("findrunnable: netpoll with spinning") 2432 } 2433 gp := netpoll(true) // block until new work is available 2434 atomic.Store64(&sched.lastpoll, uint64(nanotime())) 2435 if gp != nil { 2436 lock(&sched.lock) 2437 _p_ = pidleget() 2438 unlock(&sched.lock) 2439 if _p_ != nil { 2440 acquirep(_p_) 2441 injectglist(gp.schedlink.ptr()) 2442 casgstatus(gp, _Gwaiting, _Grunnable) 2443 if trace.enabled { 2444 traceGoUnpark(gp, 0) 2445 } 2446 return gp, false 2447 } 2448 injectglist(gp) 2449 } 2450 } 2451 stopm() 2452 goto top 2453 } 2454 2455 // pollWork returns true if there is non-background work this P could 2456 // be doing. This is a fairly lightweight check to be used for 2457 // background work loops, like idle GC. It checks a subset of the 2458 // conditions checked by the actual scheduler. 2459 func pollWork() bool { 2460 if sched.runqsize != 0 { 2461 return true 2462 } 2463 p := getg().m.p.ptr() 2464 if !runqempty(p) { 2465 return true 2466 } 2467 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 { 2468 if gp := netpoll(false); gp != nil { 2469 injectglist(gp) 2470 return true 2471 } 2472 } 2473 return false 2474 } 2475 2476 func resetspinning() { 2477 _g_ := getg() 2478 if !_g_.m.spinning { 2479 throw("resetspinning: not a spinning m") 2480 } 2481 _g_.m.spinning = false 2482 nmspinning := atomic.Xadd(&sched.nmspinning, -1) 2483 if int32(nmspinning) < 0 { 2484 throw("findrunnable: negative nmspinning") 2485 } 2486 // M wakeup policy is deliberately somewhat conservative, so check if we 2487 // need to wakeup another P here. See "Worker thread parking/unparking" 2488 // comment at the top of the file for details. 2489 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 { 2490 wakep() 2491 } 2492 } 2493 2494 // Injects the list of runnable G's into the scheduler. 2495 // Can run concurrently with GC. 2496 func injectglist(glist *g) { 2497 if glist == nil { 2498 return 2499 } 2500 if trace.enabled { 2501 for gp := glist; gp != nil; gp = gp.schedlink.ptr() { 2502 traceGoUnpark(gp, 0) 2503 } 2504 } 2505 lock(&sched.lock) 2506 var n int 2507 for n = 0; glist != nil; n++ { 2508 gp := glist 2509 glist = gp.schedlink.ptr() 2510 casgstatus(gp, _Gwaiting, _Grunnable) 2511 globrunqput(gp) 2512 } 2513 unlock(&sched.lock) 2514 for ; n != 0 && sched.npidle != 0; n-- { 2515 startm(nil, false) 2516 } 2517 } 2518 2519 // One round of scheduler: find a runnable goroutine and execute it. 2520 // 一轮调度: 找到一个状态为runnable的goroutine并执行它 2521 // Never returns. 2522 func schedule() { 2523 _g_ := getg() 2524 2525 if _g_.m.locks != 0 { 2526 throw("schedule: holding locks") 2527 } 2528 2529 if _g_.m.lockedg != 0 { 2530 stoplockedm() 2531 execute(_g_.m.lockedg.ptr(), false) // Never returns. 2532 } 2533 2534 // We should not schedule away from a g that is executing a cgo call, 2535 // since the cgo call is using the m's g0 stack. 2536 if _g_.m.incgo { 2537 throw("schedule: in cgo") 2538 } 2539 2540 top: 2541 if sched.gcwaiting != 0 { 2542 gcstopm() 2543 goto top 2544 } 2545 if _g_.m.p.ptr().runSafePointFn != 0 { 2546 runSafePointFn() 2547 } 2548 2549 var gp *g 2550 var inheritTime bool 2551 if trace.enabled || trace.shutdown { 2552 gp = traceReader() 2553 if gp != nil { 2554 casgstatus(gp, _Gwaiting, _Grunnable) 2555 traceGoUnpark(gp, 0) 2556 } 2557 } 2558 if gp == nil && gcBlackenEnabled != 0 { 2559 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr()) 2560 } 2561 if gp == nil { 2562 // Check the global runnable queue once in a while to ensure fairness. 2563 // 时不时检查全局runnable队列以确保公平 2564 // Otherwise two goroutines can completely occupy the local runqueue 2565 // by constantly respawning each other. 2566 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 { 2567 lock(&sched.lock) 2568 gp = globrunqget(_g_.m.p.ptr(), 1) 2569 unlock(&sched.lock) 2570 } 2571 } 2572 if gp == nil { 2573 // 从p的本地队列里拿 2574 gp, inheritTime = runqget(_g_.m.p.ptr()) 2575 if gp != nil && _g_.m.spinning { 2576 throw("schedule: spinning with local work") 2577 } 2578 } 2579 // 上面的步骤没能拿到可运行g, 会尝试更复杂的方法拿g(包括窃取) 2580 if gp == nil { 2581 // 以上都没找到 2582 gp, inheritTime = findrunnable() // blocks until work is available 2583 } 2584 2585 // This thread is going to run a goroutine and is not spinning anymore, 2586 // so if it was marked as spinning we need to reset it now and potentially 2587 // start a new spinning M. 2588 if _g_.m.spinning { 2589 resetspinning() 2590 } 2591 2592 if gp.lockedm != 0 { 2593 // Hands off own p to the locked m, 2594 // then blocks waiting for a new p. 2595 startlockedm(gp) 2596 goto top 2597 } 2598 2599 execute(gp, inheritTime) 2600 } 2601 2602 // dropg removes the association between m and the current goroutine m->curg (gp for short). 2603 // 解除m和m->curg的关系 2604 // Typically a caller sets gp's status away from Grunning and then 2605 // immediately calls dropg to finish the job. The caller is also responsible 2606 // for arranging that gp will be restarted using ready at an 2607 // appropriate time. After calling dropg and arranging for gp to be 2608 // readied later, the caller can do other work but eventually should 2609 // call schedule to restart the scheduling of goroutines on this m. 2610 func dropg() { 2611 _g_ := getg() 2612 2613 setMNoWB(&_g_.m.curg.m, nil) 2614 setGNoWB(&_g_.m.curg, nil) 2615 } 2616 2617 func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 2618 unlock((*mutex)(lock)) 2619 return true 2620 } 2621 2622 // park continuation on g0. 2623 // 在g0上停止这个g的执行 2624 func park_m(gp *g) { 2625 _g_ := getg() // 这个_g_是m的g0 2626 2627 if trace.enabled { 2628 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip) 2629 } 2630 2631 casgstatus(gp, _Grunning, _Gwaiting) 2632 dropg() 2633 2634 if _g_.m.waitunlockf != nil { 2635 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf)) 2636 ok := fn(gp, _g_.m.waitlock) 2637 _g_.m.waitunlockf = nil 2638 _g_.m.waitlock = nil 2639 if !ok { 2640 if trace.enabled { 2641 traceGoUnpark(gp, 2) 2642 } 2643 casgstatus(gp, _Gwaiting, _Grunnable) 2644 execute(gp, true) // Schedule it back, never returns. 2645 } 2646 } 2647 schedule() 2648 } 2649 2650 func goschedImpl(gp *g) { 2651 status := readgstatus(gp) 2652 if status&^_Gscan != _Grunning { 2653 dumpgstatus(gp) 2654 throw("bad g status") 2655 } 2656 casgstatus(gp, _Grunning, _Grunnable) 2657 dropg() 2658 lock(&sched.lock) 2659 globrunqput(gp) 2660 unlock(&sched.lock) 2661 2662 schedule() 2663 } 2664 2665 // Gosched continuation on g0. 2666 func gosched_m(gp *g) { 2667 if trace.enabled { 2668 traceGoSched() 2669 } 2670 goschedImpl(gp) 2671 } 2672 2673 // goschedguarded is a forbidden-states-avoided version of gosched_m 2674 func goschedguarded_m(gp *g) { 2675 2676 if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning { 2677 gogo(&gp.sched) // never return 2678 } 2679 2680 if trace.enabled { 2681 traceGoSched() 2682 } 2683 goschedImpl(gp) 2684 } 2685 2686 func gopreempt_m(gp *g) { 2687 if trace.enabled { 2688 traceGoPreempt() 2689 } 2690 goschedImpl(gp) 2691 } 2692 2693 // Finishes execution of the current goroutine. 2694 func goexit1() { 2695 if raceenabled { 2696 racegoend() 2697 } 2698 if trace.enabled { 2699 traceGoEnd() 2700 } 2701 mcall(goexit0) 2702 } 2703 2704 // goexit continuation on g0. 2705 // 在g0上继续 2706 func goexit0(gp *g) { 2707 _g_ := getg() // g0 2708 2709 casgstatus(gp, _Grunning, _Gdead) 2710 if isSystemGoroutine(gp) { 2711 atomic.Xadd(&sched.ngsys, -1) 2712 } 2713 gp.m = nil 2714 locked := gp.lockedm != 0 2715 gp.lockedm = 0 2716 _g_.m.lockedg = 0 2717 gp.paniconfault = false 2718 gp._defer = nil // should be true already but just in case. 2719 gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 2720 gp.writebuf = nil 2721 gp.waitreason = "" 2722 gp.param = nil 2723 gp.labels = nil 2724 gp.timer = nil 2725 2726 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { 2727 // Flush assist credit to the global pool. This gives 2728 // better information to pacing if the application is 2729 // rapidly creating an exiting goroutines. 2730 scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes)) 2731 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit) 2732 gp.gcAssistBytes = 0 2733 } 2734 2735 // Note that gp's stack scan is now "valid" because it has no 2736 // stack. 2737 gp.gcscanvalid = true 2738 dropg() // m.curg似乎永远不会指向g0, 每次schedule都是指向一个运行的用户g, 这里解除m和curg的关系 2739 2740 if _g_.m.lockedInt != 0 { 2741 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n") 2742 throw("internal lockOSThread error") 2743 } 2744 _g_.m.lockedExt = 0 2745 gfput(_g_.m.p.ptr(), gp) 2746 if locked { 2747 // The goroutine may have locked this thread because 2748 // it put it in an unusual kernel state. Kill it 2749 // rather than returning it to the thread pool. 2750 2751 // Return to mstart, which will release the P and exit 2752 // the thread. 2753 if GOOS != "plan9" { // See golang.org/issue/22227. 2754 gogo(&_g_.m.g0.sched) 2755 } 2756 } 2757 schedule() 2758 } 2759 2760 // save updates getg().sched to refer to pc and sp so that a following 2761 // gogo will restore pc and sp. 2762 // 2763 // save must not have write barriers because invoking a write barrier 2764 // can clobber getg().sched. 2765 // 2766 //go:nosplit 2767 //go:nowritebarrierrec 2768 func save(pc, sp uintptr) { 2769 _g_ := getg() 2770 2771 _g_.sched.pc = pc 2772 _g_.sched.sp = sp 2773 _g_.sched.lr = 0 2774 _g_.sched.ret = 0 2775 _g_.sched.g = guintptr(unsafe.Pointer(_g_)) 2776 // We need to ensure ctxt is zero, but can't have a write 2777 // barrier here. However, it should always already be zero. 2778 // Assert that. 2779 if _g_.sched.ctxt != nil { 2780 badctxt() 2781 } 2782 } 2783 2784 // The goroutine g is about to enter a system call. 2785 // Record that it's not using the cpu anymore. 2786 // This is called only from the go syscall library and cgocall, 2787 // not from the low-level system calls used by the runtime. 2788 // 2789 // Entersyscall cannot split the stack: the gosave must 2790 // make g->sched refer to the caller's stack segment, because 2791 // entersyscall is going to return immediately after. 2792 // 2793 // Nothing entersyscall calls can split the stack either. 2794 // We cannot safely move the stack during an active call to syscall, 2795 // because we do not know which of the uintptr arguments are 2796 // really pointers (back into the stack). 2797 // In practice, this means that we make the fast path run through 2798 // entersyscall doing no-split things, and the slow path has to use systemstack 2799 // to run bigger things on the system stack. 2800 // 2801 // reentersyscall is the entry point used by cgo callbacks, where explicitly 2802 // saved SP and PC are restored. This is needed when exitsyscall will be called 2803 // from a function further up in the call stack than the parent, as g->syscallsp 2804 // must always point to a valid stack frame. entersyscall below is the normal 2805 // entry point for syscalls, which obtains the SP and PC from the caller. 2806 // 2807 // Syscall tracing: 2808 // At the start of a syscall we emit traceGoSysCall to capture the stack trace. 2809 // If the syscall does not block, that is it, we do not emit any other events. 2810 // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock; 2811 // when syscall returns we emit traceGoSysExit and when the goroutine starts running 2812 // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart. 2813 // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock, 2814 // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick), 2815 // whoever emits traceGoSysBlock increments p.syscalltick afterwards; 2816 // and we wait for the increment before emitting traceGoSysExit. 2817 // Note that the increment is done even if tracing is not enabled, 2818 // because tracing can be enabled in the middle of syscall. We don't want the wait to hang. 2819 // 2820 //go:nosplit 2821 func reentersyscall(pc, sp uintptr) { 2822 _g_ := getg() 2823 2824 // Disable preemption because during this function g is in Gsyscall status, 2825 // but can have inconsistent g->sched, do not let GC observe it. 2826 _g_.m.locks++ 2827 2828 // Entersyscall must not call any function that might split/grow the stack. 2829 // (See details in comment above.) 2830 // Catch calls that might, by replacing the stack guard with something that 2831 // will trip any stack check and leaving a flag to tell newstack to die. 2832 _g_.stackguard0 = stackPreempt 2833 _g_.throwsplit = true 2834 2835 // Leave SP around for GC and traceback. 2836 save(pc, sp) 2837 _g_.syscallsp = sp 2838 _g_.syscallpc = pc 2839 casgstatus(_g_, _Grunning, _Gsyscall) 2840 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2841 systemstack(func() { 2842 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2843 throw("entersyscall") 2844 }) 2845 } 2846 2847 if trace.enabled { 2848 systemstack(traceGoSysCall) 2849 // systemstack itself clobbers g.sched.{pc,sp} and we might 2850 // need them later when the G is genuinely blocked in a 2851 // syscall 2852 save(pc, sp) 2853 } 2854 2855 if atomic.Load(&sched.sysmonwait) != 0 { 2856 systemstack(entersyscall_sysmon) 2857 save(pc, sp) 2858 } 2859 2860 if _g_.m.p.ptr().runSafePointFn != 0 { 2861 // runSafePointFn may stack split if run on this stack 2862 systemstack(runSafePointFn) 2863 save(pc, sp) 2864 } 2865 2866 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2867 _g_.sysblocktraced = true 2868 _g_.m.mcache = nil 2869 _g_.m.p.ptr().m = 0 2870 atomic.Store(&_g_.m.p.ptr().status, _Psyscall) 2871 if sched.gcwaiting != 0 { 2872 systemstack(entersyscall_gcwait) 2873 save(pc, sp) 2874 } 2875 2876 // Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched). 2877 // We set _StackGuard to StackPreempt so that first split stack check calls morestack. 2878 // Morestack detects this case and throws. 2879 _g_.stackguard0 = stackPreempt 2880 _g_.m.locks-- 2881 } 2882 2883 // Standard syscall entry used by the go syscall library and normal cgo calls. 2884 //go:nosplit 2885 func entersyscall(dummy int32) { 2886 reentersyscall(getcallerpc(), getcallersp(unsafe.Pointer(&dummy))) 2887 } 2888 2889 func entersyscall_sysmon() { 2890 lock(&sched.lock) 2891 if atomic.Load(&sched.sysmonwait) != 0 { 2892 atomic.Store(&sched.sysmonwait, 0) 2893 notewakeup(&sched.sysmonnote) 2894 } 2895 unlock(&sched.lock) 2896 } 2897 2898 func entersyscall_gcwait() { 2899 _g_ := getg() 2900 _p_ := _g_.m.p.ptr() 2901 2902 lock(&sched.lock) 2903 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) { 2904 if trace.enabled { 2905 traceGoSysBlock(_p_) 2906 traceProcStop(_p_) 2907 } 2908 _p_.syscalltick++ 2909 if sched.stopwait--; sched.stopwait == 0 { 2910 notewakeup(&sched.stopnote) 2911 } 2912 } 2913 unlock(&sched.lock) 2914 } 2915 2916 // The same as entersyscall(), but with a hint that the syscall is blocking. 2917 //go:nosplit 2918 func entersyscallblock(dummy int32) { 2919 _g_ := getg() 2920 2921 _g_.m.locks++ // see comment in entersyscall 2922 _g_.throwsplit = true 2923 _g_.stackguard0 = stackPreempt // see comment in entersyscall 2924 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick 2925 _g_.sysblocktraced = true 2926 _g_.m.p.ptr().syscalltick++ 2927 2928 // Leave SP around for GC and traceback. 2929 pc := getcallerpc() 2930 sp := getcallersp(unsafe.Pointer(&dummy)) 2931 save(pc, sp) 2932 _g_.syscallsp = _g_.sched.sp 2933 _g_.syscallpc = _g_.sched.pc 2934 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2935 sp1 := sp 2936 sp2 := _g_.sched.sp 2937 sp3 := _g_.syscallsp 2938 systemstack(func() { 2939 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2940 throw("entersyscallblock") 2941 }) 2942 } 2943 casgstatus(_g_, _Grunning, _Gsyscall) 2944 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp { 2945 systemstack(func() { 2946 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n") 2947 throw("entersyscallblock") 2948 }) 2949 } 2950 2951 systemstack(entersyscallblock_handoff) 2952 2953 // Resave for traceback during blocked call. 2954 save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy))) 2955 2956 _g_.m.locks-- 2957 } 2958 2959 func entersyscallblock_handoff() { 2960 if trace.enabled { 2961 traceGoSysCall() 2962 traceGoSysBlock(getg().m.p.ptr()) 2963 } 2964 handoffp(releasep()) 2965 } 2966 2967 // The goroutine g exited its system call. 2968 // Arrange for it to run on a cpu again. 2969 // This is called only from the go syscall library, not 2970 // from the low-level system calls used by the runtime. 2971 // 2972 // Write barriers are not allowed because our P may have been stolen. 2973 // 2974 //go:nosplit 2975 //go:nowritebarrierrec 2976 func exitsyscall(dummy int32) { 2977 _g_ := getg() 2978 2979 _g_.m.locks++ // see comment in entersyscall 2980 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp { 2981 // throw calls print which may try to grow the stack, 2982 // but throwsplit == true so the stack can not be grown; 2983 // use systemstack to avoid that possible problem. 2984 systemstack(func() { 2985 throw("exitsyscall: syscall frame is no longer valid") 2986 }) 2987 } 2988 2989 _g_.waitsince = 0 2990 oldp := _g_.m.p.ptr() 2991 if exitsyscallfast() { 2992 if _g_.m.mcache == nil { 2993 systemstack(func() { 2994 throw("lost mcache") 2995 }) 2996 } 2997 if trace.enabled { 2998 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 2999 systemstack(traceGoStart) 3000 } 3001 } 3002 // There's a cpu for us, so we can run. 3003 _g_.m.p.ptr().syscalltick++ 3004 // We need to cas the status and scan before resuming... 3005 casgstatus(_g_, _Gsyscall, _Grunning) 3006 3007 // Garbage collector isn't running (since we are), 3008 // so okay to clear syscallsp. 3009 _g_.syscallsp = 0 3010 _g_.m.locks-- 3011 if _g_.preempt { 3012 // restore the preemption request in case we've cleared it in newstack 3013 _g_.stackguard0 = stackPreempt 3014 } else { 3015 // otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock 3016 _g_.stackguard0 = _g_.stack.lo + _StackGuard 3017 } 3018 _g_.throwsplit = false 3019 return 3020 } 3021 3022 _g_.sysexitticks = 0 3023 if trace.enabled { 3024 // Wait till traceGoSysBlock event is emitted. 3025 // This ensures consistency of the trace (the goroutine is started after it is blocked). 3026 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick { 3027 osyield() 3028 } 3029 // We can't trace syscall exit right now because we don't have a P. 3030 // Tracing code can invoke write barriers that cannot run without a P. 3031 // So instead we remember the syscall exit time and emit the event 3032 // in execute when we have a P. 3033 _g_.sysexitticks = cputicks() 3034 } 3035 3036 _g_.m.locks-- 3037 3038 // Call the scheduler. 3039 mcall(exitsyscall0) 3040 3041 if _g_.m.mcache == nil { 3042 systemstack(func() { 3043 throw("lost mcache") 3044 }) 3045 } 3046 3047 // Scheduler returned, so we're allowed to run now. 3048 // Delete the syscallsp information that we left for 3049 // the garbage collector during the system call. 3050 // Must wait until now because until gosched returns 3051 // we don't know for sure that the garbage collector 3052 // is not running. 3053 _g_.syscallsp = 0 3054 _g_.m.p.ptr().syscalltick++ 3055 _g_.throwsplit = false 3056 } 3057 3058 //go:nosplit 3059 func exitsyscallfast() bool { 3060 _g_ := getg() 3061 3062 // Freezetheworld sets stopwait but does not retake P's. 3063 if sched.stopwait == freezeStopWait { 3064 _g_.m.mcache = nil 3065 _g_.m.p = 0 3066 return false 3067 } 3068 3069 // Try to re-acquire the last P. 3070 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) { 3071 // There's a cpu for us, so we can run. 3072 exitsyscallfast_reacquired() 3073 return true 3074 } 3075 3076 // Try to get any other idle P. 3077 oldp := _g_.m.p.ptr() 3078 _g_.m.mcache = nil 3079 _g_.m.p = 0 3080 if sched.pidle != 0 { 3081 var ok bool 3082 systemstack(func() { 3083 ok = exitsyscallfast_pidle() 3084 if ok && trace.enabled { 3085 if oldp != nil { 3086 // Wait till traceGoSysBlock event is emitted. 3087 // This ensures consistency of the trace (the goroutine is started after it is blocked). 3088 for oldp.syscalltick == _g_.m.syscalltick { 3089 osyield() 3090 } 3091 } 3092 traceGoSysExit(0) 3093 } 3094 }) 3095 if ok { 3096 return true 3097 } 3098 } 3099 return false 3100 } 3101 3102 // exitsyscallfast_reacquired is the exitsyscall path on which this G 3103 // has successfully reacquired the P it was running on before the 3104 // syscall. 3105 // 3106 // This function is allowed to have write barriers because exitsyscall 3107 // has acquired a P at this point. 3108 // 3109 //go:yeswritebarrierrec 3110 //go:nosplit 3111 func exitsyscallfast_reacquired() { 3112 _g_ := getg() 3113 _g_.m.mcache = _g_.m.p.ptr().mcache 3114 _g_.m.p.ptr().m.set(_g_.m) 3115 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick { 3116 if trace.enabled { 3117 // The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed). 3118 // traceGoSysBlock for this syscall was already emitted, 3119 // but here we effectively retake the p from the new syscall running on the same p. 3120 systemstack(func() { 3121 // Denote blocking of the new syscall. 3122 traceGoSysBlock(_g_.m.p.ptr()) 3123 // Denote completion of the current syscall. 3124 traceGoSysExit(0) 3125 }) 3126 } 3127 _g_.m.p.ptr().syscalltick++ 3128 } 3129 } 3130 3131 func exitsyscallfast_pidle() bool { 3132 lock(&sched.lock) 3133 _p_ := pidleget() 3134 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 { 3135 atomic.Store(&sched.sysmonwait, 0) 3136 notewakeup(&sched.sysmonnote) 3137 } 3138 unlock(&sched.lock) 3139 if _p_ != nil { 3140 acquirep(_p_) 3141 return true 3142 } 3143 return false 3144 } 3145 3146 // exitsyscall slow path on g0. 3147 // Failed to acquire P, enqueue gp as runnable. 3148 // 3149 //go:nowritebarrierrec 3150 func exitsyscall0(gp *g) { 3151 _g_ := getg() 3152 3153 casgstatus(gp, _Gsyscall, _Grunnable) 3154 dropg() 3155 lock(&sched.lock) 3156 _p_ := pidleget() 3157 if _p_ == nil { 3158 globrunqput(gp) 3159 } else if atomic.Load(&sched.sysmonwait) != 0 { 3160 atomic.Store(&sched.sysmonwait, 0) 3161 notewakeup(&sched.sysmonnote) 3162 } 3163 unlock(&sched.lock) 3164 if _p_ != nil { 3165 acquirep(_p_) 3166 execute(gp, false) // Never returns. 3167 } 3168 if _g_.m.lockedg != 0 { 3169 // Wait until another thread schedules gp and so m again. 3170 stoplockedm() 3171 execute(gp, false) // Never returns. 3172 } 3173 stopm() 3174 schedule() // Never returns. 3175 } 3176 3177 func beforefork() { 3178 gp := getg().m.curg 3179 3180 // Block signals during a fork, so that the child does not run 3181 // a signal handler before exec if a signal is sent to the process 3182 // group. See issue #18600. 3183 gp.m.locks++ 3184 msigsave(gp.m) 3185 sigblock() 3186 3187 // This function is called before fork in syscall package. 3188 // Code between fork and exec must not allocate memory nor even try to grow stack. 3189 // Here we spoil g->_StackGuard to reliably detect any attempts to grow stack. 3190 // runtime_AfterFork will undo this in parent process, but not in child. 3191 gp.stackguard0 = stackFork 3192 } 3193 3194 // Called from syscall package before fork. 3195 //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork 3196 //go:nosplit 3197 func syscall_runtime_BeforeFork() { 3198 systemstack(beforefork) 3199 } 3200 3201 func afterfork() { 3202 gp := getg().m.curg 3203 3204 // See the comments in beforefork. 3205 gp.stackguard0 = gp.stack.lo + _StackGuard 3206 3207 msigrestore(gp.m.sigmask) 3208 3209 gp.m.locks-- 3210 } 3211 3212 // Called from syscall package after fork in parent. 3213 //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork 3214 //go:nosplit 3215 func syscall_runtime_AfterFork() { 3216 systemstack(afterfork) 3217 } 3218 3219 // inForkedChild is true while manipulating signals in the child process. 3220 // This is used to avoid calling libc functions in case we are using vfork. 3221 var inForkedChild bool 3222 3223 // Called from syscall package after fork in child. 3224 // It resets non-sigignored signals to the default handler, and 3225 // restores the signal mask in preparation for the exec. 3226 // 3227 // Because this might be called during a vfork, and therefore may be 3228 // temporarily sharing address space with the parent process, this must 3229 // not change any global variables or calling into C code that may do so. 3230 // 3231 //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild 3232 //go:nosplit 3233 //go:nowritebarrierrec 3234 func syscall_runtime_AfterForkInChild() { 3235 // It's OK to change the global variable inForkedChild here 3236 // because we are going to change it back. There is no race here, 3237 // because if we are sharing address space with the parent process, 3238 // then the parent process can not be running concurrently. 3239 inForkedChild = true 3240 3241 clearSignalHandlers() 3242 3243 // When we are the child we are the only thread running, 3244 // so we know that nothing else has changed gp.m.sigmask. 3245 msigrestore(getg().m.sigmask) 3246 3247 inForkedChild = false 3248 } 3249 3250 // Called from syscall package before Exec. 3251 //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec 3252 func syscall_runtime_BeforeExec() { 3253 // Prevent thread creation during exec. 3254 execLock.lock() 3255 } 3256 3257 // Called from syscall package after Exec. 3258 //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec 3259 func syscall_runtime_AfterExec() { 3260 execLock.unlock() 3261 } 3262 3263 // Allocate a new g, with a stack big enough for stacksize bytes. 3264 func malg(stacksize int32) *g { 3265 newg := new(g) 3266 if stacksize >= 0 { 3267 stacksize = round2(_StackSystem + stacksize) 3268 systemstack(func() { 3269 newg.stack = stackalloc(uint32(stacksize)) 3270 }) 3271 newg.stackguard0 = newg.stack.lo + _StackGuard 3272 newg.stackguard1 = ^uintptr(0) 3273 } 3274 return newg 3275 } 3276 3277 // Create a new g running fn with siz bytes of arguments. 3278 // 创建一个g运行fn 3279 // Put it on the queue of g's waiting to run. 3280 // 将其放到g的等待运行队列 3281 // The compiler turns a go statement into a call to this. 3282 // 编译器将一个go语句转为这个函数的调用 3283 // Cannot split the stack because it assumes that the arguments 3284 // are available sequentially after &fn; they would not be 3285 // copied if a stack split occurred. 3286 // 不能split这个栈因为其假设参数是连续的在&fn之后 3287 // 第一个goroutine的fn指向mainPC, 其指向runtime.main, 且siz==0 3288 // 当用go语句创建goroutine时, 编译器会将其转为对此函数的调用, fn指向函数体的机器码 3289 // 调用这个函数时栈结构从下往上是 3290 // extra args pass to func 3291 // funcval 3292 // sizeof func + extra args(siz) 3293 //go:nosplit 3294 func newproc(siz int32, fn *funcval) { 3295 argp := add(unsafe.Pointer(&fn), sys.PtrSize) // 参数指针 3296 pc := getcallerpc() 3297 systemstack(func() { 3298 newproc1(fn, (*uint8)(argp), siz, pc) 3299 }) 3300 } 3301 3302 // 创建g 3303 // Create a new g running fn with narg bytes of arguments starting 3304 // at argp. callerpc is the address of the go statement that created 3305 // this. The new g is put on the queue of g's waiting to run. 3306 func newproc1(fn *funcval, argp *uint8, narg int32, callerpc uintptr) { 3307 _g_ := getg() 3308 3309 if fn == nil { 3310 _g_.m.throwing = -1 // do not dump full stacks 3311 throw("go of nil func value") 3312 } 3313 _g_.m.locks++ // disable preemption because it can be holding p in a local var 3314 siz := narg 3315 siz = (siz + 7) &^ 7 // 向上取8倍数 3316 3317 // We could allocate a larger initial stack if necessary. 3318 // Not worth it: this is almost always an error. 3319 // 4*sizeof(uintreg): extra space added below 3320 // sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall). 3321 if siz >= _StackMin-4*sys.RegSize-sys.RegSize { 3322 throw("newproc: function arguments too large for new goroutine") 3323 } 3324 3325 _p_ := _g_.m.p.ptr() // 可以看出新建的g还是关联建这个g的g的p 3326 newg := gfget(_p_) // 从p的free list中拿一个g 3327 if newg == nil { 3328 newg = malg(_StackMin) 3329 casgstatus(newg, _Gidle, _Gdead) 3330 allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 3331 } 3332 if newg.stack.hi == 0 { 3333 throw("newproc1: newg missing stack") 3334 } 3335 3336 if readgstatus(newg) != _Gdead { 3337 throw("newproc1: new g is not Gdead") 3338 } 3339 3340 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame 3341 totalSize += -totalSize & (sys.SpAlign - 1) // align to spAlign 3342 sp := newg.stack.hi - totalSize 3343 spArg := sp 3344 if usesLR { 3345 // caller's LR 3346 *(*uintptr)(unsafe.Pointer(sp)) = 0 3347 prepGoExitFrame(sp) 3348 spArg += sys.MinFrameSize 3349 } 3350 if narg > 0 { 3351 // 将参数拷贝到g的栈上 3352 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg)) 3353 // This is a stack-to-stack copy. If write barriers 3354 // are enabled and the source stack is grey (the 3355 // destination is always black), then perform a 3356 // barrier copy. We do this *after* the memmove 3357 // because the destination stack may have garbage on 3358 // it. 3359 if writeBarrier.needed && !_g_.m.curg.gcscandone { 3360 f := findfunc(fn.fn) 3361 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps)) 3362 // We're in the prologue, so it's always stack map index 0. 3363 bv := stackmapdata(stkmap, 0) 3364 bulkBarrierBitmap(spArg, spArg, uintptr(narg), 0, bv.bytedata) 3365 } 3366 } 3367 3368 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 3369 newg.sched.sp = sp 3370 newg.stktopsp = sp 3371 newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 3372 newg.sched.g = guintptr(unsafe.Pointer(newg)) 3373 gostartcallfn(&newg.sched, fn) 3374 newg.gopc = callerpc 3375 newg.startpc = fn.fn 3376 if _g_.m.curg != nil { 3377 newg.labels = _g_.m.curg.labels 3378 } 3379 if isSystemGoroutine(newg) { 3380 atomic.Xadd(&sched.ngsys, +1) 3381 } 3382 newg.gcscanvalid = false 3383 casgstatus(newg, _Gdead, _Grunnable) 3384 3385 if _p_.goidcache == _p_.goidcacheend { 3386 // Sched.goidgen is the last allocated id, 3387 // this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 3388 // At startup sched.goidgen=0, so main goroutine receives goid=1. 3389 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch) 3390 _p_.goidcache -= _GoidCacheBatch - 1 3391 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch 3392 } 3393 newg.goid = int64(_p_.goidcache) 3394 _p_.goidcache++ 3395 if raceenabled { 3396 newg.racectx = racegostart(callerpc) 3397 } 3398 if trace.enabled { 3399 traceGoCreate(newg, newg.startpc) 3400 } 3401 runqput(_p_, newg, true) 3402 3403 // 如果程序启动且有空闲的p, 且无自旋的m, 则有可能新建一个m, 这会保证有足够的m运行p 3404 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted { 3405 wakep() 3406 } 3407 _g_.m.locks-- 3408 if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack 3409 _g_.stackguard0 = stackPreempt 3410 } 3411 } 3412 3413 // Put on gfree list. 3414 // If local list is too long, transfer a batch to the global list. 3415 func gfput(_p_ *p, gp *g) { 3416 if readgstatus(gp) != _Gdead { 3417 throw("gfput: bad status (not Gdead)") 3418 } 3419 3420 stksize := gp.stack.hi - gp.stack.lo 3421 3422 if stksize != _FixedStack { 3423 // non-standard stack size - free it. 3424 stackfree(gp.stack) 3425 gp.stack.lo = 0 3426 gp.stack.hi = 0 3427 gp.stackguard0 = 0 3428 } 3429 3430 gp.schedlink.set(_p_.gfree) 3431 _p_.gfree = gp 3432 _p_.gfreecnt++ 3433 if _p_.gfreecnt >= 64 { 3434 lock(&sched.gflock) 3435 for _p_.gfreecnt >= 32 { 3436 _p_.gfreecnt-- 3437 gp = _p_.gfree 3438 _p_.gfree = gp.schedlink.ptr() 3439 if gp.stack.lo == 0 { 3440 gp.schedlink.set(sched.gfreeNoStack) 3441 sched.gfreeNoStack = gp 3442 } else { 3443 gp.schedlink.set(sched.gfreeStack) 3444 sched.gfreeStack = gp 3445 } 3446 sched.ngfree++ 3447 } 3448 unlock(&sched.gflock) 3449 } 3450 } 3451 3452 // Get from gfree list. 3453 // If local list is empty, grab a batch from global list. 3454 // 从gfree列表中拿个g, 如果本地列表是空, 从全局列表中拿一个 3455 func gfget(_p_ *p) *g { 3456 retry: 3457 gp := _p_.gfree 3458 if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) { 3459 lock(&sched.gflock) 3460 for _p_.gfreecnt < 32 { 3461 if sched.gfreeStack != nil { 3462 // Prefer Gs with stacks. 3463 gp = sched.gfreeStack 3464 sched.gfreeStack = gp.schedlink.ptr() 3465 } else if sched.gfreeNoStack != nil { 3466 gp = sched.gfreeNoStack 3467 sched.gfreeNoStack = gp.schedlink.ptr() 3468 } else { 3469 break 3470 } 3471 _p_.gfreecnt++ 3472 sched.ngfree-- 3473 gp.schedlink.set(_p_.gfree) 3474 _p_.gfree = gp 3475 } 3476 unlock(&sched.gflock) 3477 goto retry 3478 } 3479 if gp != nil { 3480 _p_.gfree = gp.schedlink.ptr() 3481 _p_.gfreecnt-- 3482 if gp.stack.lo == 0 { 3483 // Stack was deallocated in gfput. Allocate a new one. 3484 systemstack(func() { 3485 gp.stack = stackalloc(_FixedStack) 3486 }) 3487 gp.stackguard0 = gp.stack.lo + _StackGuard 3488 } else { 3489 if raceenabled { 3490 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3491 } 3492 if msanenabled { 3493 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo) 3494 } 3495 } 3496 } 3497 return gp 3498 } 3499 3500 // Purge all cached G's from gfree list to the global list. 3501 func gfpurge(_p_ *p) { 3502 lock(&sched.gflock) 3503 for _p_.gfreecnt != 0 { 3504 _p_.gfreecnt-- 3505 gp := _p_.gfree 3506 _p_.gfree = gp.schedlink.ptr() 3507 if gp.stack.lo == 0 { 3508 gp.schedlink.set(sched.gfreeNoStack) 3509 sched.gfreeNoStack = gp 3510 } else { 3511 gp.schedlink.set(sched.gfreeStack) 3512 sched.gfreeStack = gp 3513 } 3514 sched.ngfree++ 3515 } 3516 unlock(&sched.gflock) 3517 } 3518 3519 // Breakpoint executes a breakpoint trap. 3520 func Breakpoint() { 3521 breakpoint() 3522 } 3523 3524 // dolockOSThread is called by LockOSThread and lockOSThread below 3525 // after they modify m.locked. Do not allow preemption during this call, 3526 // or else the m might be different in this function than in the caller. 3527 //go:nosplit 3528 func dolockOSThread() { 3529 _g_ := getg() 3530 _g_.m.lockedg.set(_g_) 3531 _g_.lockedm.set(_g_.m) 3532 } 3533 3534 //go:nosplit 3535 3536 // LockOSThread wires the calling goroutine to its current operating system thread. 3537 // The calling goroutine will always execute in that thread, 3538 // and no other goroutine will execute in it, 3539 // until the calling goroutine has made as many calls to 3540 // UnlockOSThread as to LockOSThread. 3541 // If the calling goroutine exits without unlocking the thread, 3542 // the thread will be terminated. 3543 // 3544 // A goroutine should call LockOSThread before calling OS services or 3545 // non-Go library functions that depend on per-thread state. 3546 func LockOSThread() { 3547 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" { 3548 // If we need to start a new thread from the locked 3549 // thread, we need the template thread. Start it now 3550 // while we're in a known-good state. 3551 startTemplateThread() 3552 } 3553 _g_ := getg() 3554 _g_.m.lockedExt++ 3555 if _g_.m.lockedExt == 0 { 3556 _g_.m.lockedExt-- 3557 panic("LockOSThread nesting overflow") 3558 } 3559 dolockOSThread() 3560 } 3561 3562 //go:nosplit 3563 func lockOSThread() { 3564 getg().m.lockedInt++ 3565 dolockOSThread() 3566 } 3567 3568 // dounlockOSThread is called by UnlockOSThread and unlockOSThread below 3569 // after they update m->locked. Do not allow preemption during this call, 3570 // or else the m might be in different in this function than in the caller. 3571 //go:nosplit 3572 func dounlockOSThread() { 3573 _g_ := getg() 3574 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 { 3575 return 3576 } 3577 _g_.m.lockedg = 0 3578 _g_.lockedm = 0 3579 } 3580 3581 //go:nosplit 3582 3583 // UnlockOSThread undoes an earlier call to LockOSThread. 3584 // If this drops the number of active LockOSThread calls on the 3585 // calling goroutine to zero, it unwires the calling goroutine from 3586 // its fixed operating system thread. 3587 // If there are no active LockOSThread calls, this is a no-op. 3588 // 3589 // Before calling UnlockOSThread, the caller must ensure that the OS 3590 // thread is suitable for running other goroutines. If the caller made 3591 // any permanent changes to the state of the thread that would affect 3592 // other goroutines, it should not call this function and thus leave 3593 // the goroutine locked to the OS thread until the goroutine (and 3594 // hence the thread) exits. 3595 func UnlockOSThread() { 3596 _g_ := getg() 3597 if _g_.m.lockedExt == 0 { 3598 return 3599 } 3600 _g_.m.lockedExt-- 3601 dounlockOSThread() 3602 } 3603 3604 //go:nosplit 3605 func unlockOSThread() { 3606 _g_ := getg() 3607 if _g_.m.lockedInt == 0 { 3608 systemstack(badunlockosthread) 3609 } 3610 _g_.m.lockedInt-- 3611 dounlockOSThread() 3612 } 3613 3614 func badunlockosthread() { 3615 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread") 3616 } 3617 3618 func gcount() int32 { 3619 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys)) 3620 for _, _p_ := range allp { 3621 n -= _p_.gfreecnt 3622 } 3623 3624 // All these variables can be changed concurrently, so the result can be inconsistent. 3625 // But at least the current goroutine is running. 3626 if n < 1 { 3627 n = 1 3628 } 3629 return n 3630 } 3631 3632 func mcount() int32 { 3633 return int32(sched.mnext - sched.nmfreed) 3634 } 3635 3636 var prof struct { 3637 signalLock uint32 3638 hz int32 3639 } 3640 3641 func _System() { _System() } 3642 func _ExternalCode() { _ExternalCode() } 3643 func _LostExternalCode() { _LostExternalCode() } 3644 func _GC() { _GC() } 3645 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } 3646 3647 // Counts SIGPROFs received while in atomic64 critical section, on mips{,le} 3648 var lostAtomic64Count uint64 3649 3650 // Called if we receive a SIGPROF signal. 3651 // Called by the signal handler, may run during STW. 3652 //go:nowritebarrierrec 3653 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { 3654 if prof.hz == 0 { 3655 return 3656 } 3657 3658 // On mips{,le}, 64bit atomics are emulated with spinlocks, in 3659 // runtime/internal/atomic. If SIGPROF arrives while the program is inside 3660 // the critical section, it creates a deadlock (when writing the sample). 3661 // As a workaround, create a counter of SIGPROFs while in critical section 3662 // to store the count, and pass it to sigprof.add() later when SIGPROF is 3663 // received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc). 3664 if GOARCH == "mips" || GOARCH == "mipsle" { 3665 if f := findfunc(pc); f.valid() { 3666 if hasprefix(funcname(f), "runtime/internal/atomic") { 3667 lostAtomic64Count++ 3668 return 3669 } 3670 } 3671 } 3672 3673 // Profiling runs concurrently with GC, so it must not allocate. 3674 // Set a trap in case the code does allocate. 3675 // Note that on windows, one thread takes profiles of all the 3676 // other threads, so mp is usually not getg().m. 3677 // In fact mp may not even be stopped. 3678 // See golang.org/issue/17165. 3679 getg().m.mallocing++ 3680 3681 // Define that a "user g" is a user-created goroutine, and a "system g" 3682 // is one that is m->g0 or m->gsignal. 3683 // 3684 // We might be interrupted for profiling halfway through a 3685 // goroutine switch. The switch involves updating three (or four) values: 3686 // g, PC, SP, and (on arm) LR. The PC must be the last to be updated, 3687 // because once it gets updated the new g is running. 3688 // 3689 // When switching from a user g to a system g, LR is not considered live, 3690 // so the update only affects g, SP, and PC. Since PC must be last, there 3691 // the possible partial transitions in ordinary execution are (1) g alone is updated, 3692 // (2) both g and SP are updated, and (3) SP alone is updated. 3693 // If SP or g alone is updated, we can detect the partial transition by checking 3694 // whether the SP is within g's stack bounds. (We could also require that SP 3695 // be changed only after g, but the stack bounds check is needed by other 3696 // cases, so there is no need to impose an additional requirement.) 3697 // 3698 // There is one exceptional transition to a system g, not in ordinary execution. 3699 // When a signal arrives, the operating system starts the signal handler running 3700 // with an updated PC and SP. The g is updated last, at the beginning of the 3701 // handler. There are two reasons this is okay. First, until g is updated the 3702 // g and SP do not match, so the stack bounds check detects the partial transition. 3703 // Second, signal handlers currently run with signals disabled, so a profiling 3704 // signal cannot arrive during the handler. 3705 // 3706 // When switching from a system g to a user g, there are three possibilities. 3707 // 3708 // First, it may be that the g switch has no PC update, because the SP 3709 // either corresponds to a user g throughout (as in asmcgocall) 3710 // or because it has been arranged to look like a user g frame 3711 // (as in cgocallback_gofunc). In this case, since the entire 3712 // transition is a g+SP update, a partial transition updating just one of 3713 // those will be detected by the stack bounds check. 3714 // 3715 // Second, when returning from a signal handler, the PC and SP updates 3716 // are performed by the operating system in an atomic update, so the g 3717 // update must be done before them. The stack bounds check detects 3718 // the partial transition here, and (again) signal handlers run with signals 3719 // disabled, so a profiling signal cannot arrive then anyway. 3720 // 3721 // Third, the common case: it may be that the switch updates g, SP, and PC 3722 // separately. If the PC is within any of the functions that does this, 3723 // we don't ask for a traceback. C.F. the function setsSP for more about this. 3724 // 3725 // There is another apparently viable approach, recorded here in case 3726 // the "PC within setsSP function" check turns out not to be usable. 3727 // It would be possible to delay the update of either g or SP until immediately 3728 // before the PC update instruction. Then, because of the stack bounds check, 3729 // the only problematic interrupt point is just before that PC update instruction, 3730 // and the sigprof handler can detect that instruction and simulate stepping past 3731 // it in order to reach a consistent state. On ARM, the update of g must be made 3732 // in two places (in R10 and also in a TLS slot), so the delayed update would 3733 // need to be the SP update. The sigprof handler must read the instruction at 3734 // the current PC and if it was the known instruction (for example, JMP BX or 3735 // MOV R2, PC), use that other register in place of the PC value. 3736 // The biggest drawback to this solution is that it requires that we can tell 3737 // whether it's safe to read from the memory pointed at by PC. 3738 // In a correct program, we can test PC == nil and otherwise read, 3739 // but if a profiling signal happens at the instant that a program executes 3740 // a bad jump (before the program manages to handle the resulting fault) 3741 // the profiling handler could fault trying to read nonexistent memory. 3742 // 3743 // To recap, there are no constraints on the assembly being used for the 3744 // transition. We simply require that g and SP match and that the PC is not 3745 // in gogo. 3746 traceback := true 3747 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) { 3748 traceback = false 3749 } 3750 var stk [maxCPUProfStack]uintptr 3751 n := 0 3752 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { 3753 cgoOff := 0 3754 // Check cgoCallersUse to make sure that we are not 3755 // interrupting other code that is fiddling with 3756 // cgoCallers. We are running in a signal handler 3757 // with all signals blocked, so we don't have to worry 3758 // about any other code interrupting us. 3759 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 { 3760 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { 3761 cgoOff++ 3762 } 3763 copy(stk[:], mp.cgoCallers[:cgoOff]) 3764 mp.cgoCallers[0] = 0 3765 } 3766 3767 // Collect Go stack that leads to the cgo call. 3768 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0) 3769 } else if traceback { 3770 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) 3771 } 3772 3773 if n <= 0 { 3774 // Normal traceback is impossible or has failed. 3775 // See if it falls into several common cases. 3776 n = 0 3777 if GOOS == "windows" && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { 3778 // Libcall, i.e. runtime syscall on windows. 3779 // Collect Go stack that leads to the call. 3780 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) 3781 } 3782 if n == 0 { 3783 // If all of the above has failed, account it against abstract "System" or "GC". 3784 n = 2 3785 // "ExternalCode" is better than "etext". 3786 if pc > firstmoduledata.etext { 3787 pc = funcPC(_ExternalCode) + sys.PCQuantum 3788 } 3789 stk[0] = pc 3790 if mp.preemptoff != "" || mp.helpgc != 0 { 3791 stk[1] = funcPC(_GC) + sys.PCQuantum 3792 } else { 3793 stk[1] = funcPC(_System) + sys.PCQuantum 3794 } 3795 } 3796 } 3797 3798 if prof.hz != 0 { 3799 if (GOARCH == "mips" || GOARCH == "mipsle") && lostAtomic64Count > 0 { 3800 cpuprof.addLostAtomic64(lostAtomic64Count) 3801 lostAtomic64Count = 0 3802 } 3803 cpuprof.add(gp, stk[:n]) 3804 } 3805 getg().m.mallocing-- 3806 } 3807 3808 // If the signal handler receives a SIGPROF signal on a non-Go thread, 3809 // it tries to collect a traceback into sigprofCallers. 3810 // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback. 3811 var sigprofCallers cgoCallers 3812 var sigprofCallersUse uint32 3813 3814 // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread, 3815 // and the signal handler collected a stack trace in sigprofCallers. 3816 // When this is called, sigprofCallersUse will be non-zero. 3817 // g is nil, and what we can do is very limited. 3818 //go:nosplit 3819 //go:nowritebarrierrec 3820 func sigprofNonGo() { 3821 if prof.hz != 0 { 3822 n := 0 3823 for n < len(sigprofCallers) && sigprofCallers[n] != 0 { 3824 n++ 3825 } 3826 cpuprof.addNonGo(sigprofCallers[:n]) 3827 } 3828 3829 atomic.Store(&sigprofCallersUse, 0) 3830 } 3831 3832 // sigprofNonGoPC is called when a profiling signal arrived on a 3833 // non-Go thread and we have a single PC value, not a stack trace. 3834 // g is nil, and what we can do is very limited. 3835 //go:nosplit 3836 //go:nowritebarrierrec 3837 func sigprofNonGoPC(pc uintptr) { 3838 if prof.hz != 0 { 3839 stk := []uintptr{ 3840 pc, 3841 funcPC(_ExternalCode) + sys.PCQuantum, 3842 } 3843 cpuprof.addNonGo(stk) 3844 } 3845 } 3846 3847 // Reports whether a function will set the SP 3848 // to an absolute value. Important that 3849 // we don't traceback when these are at the bottom 3850 // of the stack since we can't be sure that we will 3851 // find the caller. 3852 // 3853 // If the function is not on the bottom of the stack 3854 // we assume that it will have set it up so that traceback will be consistent, 3855 // either by being a traceback terminating function 3856 // or putting one on the stack at the right offset. 3857 func setsSP(pc uintptr) bool { 3858 f := findfunc(pc) 3859 if !f.valid() { 3860 // couldn't find the function for this PC, 3861 // so assume the worst and stop traceback 3862 return true 3863 } 3864 switch f.funcID { 3865 case funcID_gogo, funcID_systemstack, funcID_mcall, funcID_morestack: 3866 return true 3867 } 3868 return false 3869 } 3870 3871 // setcpuprofilerate sets the CPU profiling rate to hz times per second. 3872 // If hz <= 0, setcpuprofilerate turns off CPU profiling. 3873 func setcpuprofilerate(hz int32) { 3874 // Force sane arguments. 3875 if hz < 0 { 3876 hz = 0 3877 } 3878 3879 // Disable preemption, otherwise we can be rescheduled to another thread 3880 // that has profiling enabled. 3881 _g_ := getg() 3882 _g_.m.locks++ 3883 3884 // Stop profiler on this thread so that it is safe to lock prof. 3885 // if a profiling signal came in while we had prof locked, 3886 // it would deadlock. 3887 setThreadCPUProfiler(0) 3888 3889 for !atomic.Cas(&prof.signalLock, 0, 1) { 3890 osyield() 3891 } 3892 if prof.hz != hz { 3893 setProcessCPUProfiler(hz) 3894 prof.hz = hz 3895 } 3896 atomic.Store(&prof.signalLock, 0) 3897 3898 lock(&sched.lock) 3899 sched.profilehz = hz 3900 unlock(&sched.lock) 3901 3902 if hz != 0 { 3903 setThreadCPUProfiler(hz) 3904 } 3905 3906 _g_.m.locks-- 3907 } 3908 3909 // Change number of processors. The world is stopped, sched is locked. 3910 // gcworkbufs are not being modified by either the GC or 3911 // the write barrier code. 3912 // Returns list of Ps with local work, they need to be scheduled by the caller. 3913 func procresize(nprocs int32) *p { 3914 old := gomaxprocs 3915 if old < 0 || nprocs <= 0 { 3916 throw("procresize: invalid arg") 3917 } 3918 if trace.enabled { 3919 traceGomaxprocs(nprocs) 3920 } 3921 3922 // update statistics 3923 now := nanotime() 3924 if sched.procresizetime != 0 { 3925 sched.totaltime += int64(old) * (now - sched.procresizetime) 3926 } 3927 sched.procresizetime = now 3928 3929 // Grow allp if necessary. 3930 if nprocs > int32(len(allp)) { 3931 // Synchronize with retake, which could be running 3932 // concurrently since it doesn't run on a P. 3933 lock(&allpLock) 3934 if nprocs <= int32(cap(allp)) { 3935 allp = allp[:nprocs] 3936 } else { 3937 nallp := make([]*p, nprocs) 3938 // Copy everything up to allp's cap so we 3939 // never lose old allocated Ps. 3940 copy(nallp, allp[:cap(allp)]) 3941 allp = nallp 3942 } 3943 unlock(&allpLock) 3944 } 3945 3946 // initialize new P's 3947 for i := int32(0); i < nprocs; i++ { 3948 pp := allp[i] 3949 if pp == nil { 3950 pp = new(p) 3951 pp.id = i 3952 pp.status = _Pgcstop 3953 pp.sudogcache = pp.sudogbuf[:0] 3954 for i := range pp.deferpool { 3955 pp.deferpool[i] = pp.deferpoolbuf[i][:0] 3956 } 3957 pp.wbBuf.reset() 3958 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp)) 3959 } 3960 if pp.mcache == nil { 3961 if old == 0 && i == 0 { 3962 if getg().m.mcache == nil { 3963 throw("missing mcache?") 3964 } 3965 pp.mcache = getg().m.mcache // bootstrap 3966 } else { 3967 pp.mcache = allocmcache() 3968 } 3969 } 3970 if raceenabled && pp.racectx == 0 { 3971 if old == 0 && i == 0 { 3972 pp.racectx = raceprocctx0 3973 raceprocctx0 = 0 // bootstrap 3974 } else { 3975 pp.racectx = raceproccreate() 3976 } 3977 } 3978 } 3979 3980 // free unused P's 3981 for i := nprocs; i < old; i++ { 3982 p := allp[i] 3983 if trace.enabled && p == getg().m.p.ptr() { 3984 // moving to p[0], pretend that we were descheduled 3985 // and then scheduled again to keep the trace sane. 3986 traceGoSched() 3987 traceProcStop(p) 3988 } 3989 // move all runnable goroutines to the global queue 3990 for p.runqhead != p.runqtail { 3991 // pop from tail of local queue 3992 p.runqtail-- 3993 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr() 3994 // push onto head of global queue 3995 globrunqputhead(gp) 3996 } 3997 if p.runnext != 0 { 3998 globrunqputhead(p.runnext.ptr()) 3999 p.runnext = 0 4000 } 4001 // if there's a background worker, make it runnable and put 4002 // it on the global queue so it can clean itself up 4003 if gp := p.gcBgMarkWorker.ptr(); gp != nil { 4004 casgstatus(gp, _Gwaiting, _Grunnable) 4005 if trace.enabled { 4006 traceGoUnpark(gp, 0) 4007 } 4008 globrunqput(gp) 4009 // This assignment doesn't race because the 4010 // world is stopped. 4011 p.gcBgMarkWorker.set(nil) 4012 } 4013 // Flush p's write barrier buffer. 4014 if gcphase != _GCoff { 4015 wbBufFlush1(p) 4016 p.gcw.dispose() 4017 } 4018 for i := range p.sudogbuf { 4019 p.sudogbuf[i] = nil 4020 } 4021 p.sudogcache = p.sudogbuf[:0] 4022 for i := range p.deferpool { 4023 for j := range p.deferpoolbuf[i] { 4024 p.deferpoolbuf[i][j] = nil 4025 } 4026 p.deferpool[i] = p.deferpoolbuf[i][:0] 4027 } 4028 freemcache(p.mcache) 4029 p.mcache = nil 4030 gfpurge(p) 4031 traceProcFree(p) 4032 if raceenabled { 4033 raceprocdestroy(p.racectx) 4034 p.racectx = 0 4035 } 4036 p.gcAssistTime = 0 4037 p.status = _Pdead 4038 // can't free P itself because it can be referenced by an M in syscall 4039 } 4040 4041 // Trim allp. 4042 if int32(len(allp)) != nprocs { 4043 lock(&allpLock) 4044 allp = allp[:nprocs] 4045 unlock(&allpLock) 4046 } 4047 4048 _g_ := getg() 4049 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs { 4050 // continue to use the current P 4051 _g_.m.p.ptr().status = _Prunning 4052 } else { 4053 // release the current P and acquire allp[0] 4054 if _g_.m.p != 0 { 4055 _g_.m.p.ptr().m = 0 4056 } 4057 _g_.m.p = 0 4058 _g_.m.mcache = nil 4059 p := allp[0] 4060 p.m = 0 4061 p.status = _Pidle 4062 acquirep(p) 4063 if trace.enabled { 4064 traceGoStart() 4065 } 4066 } 4067 var runnablePs *p 4068 for i := nprocs - 1; i >= 0; i-- { 4069 p := allp[i] 4070 if _g_.m.p.ptr() == p { 4071 continue 4072 } 4073 p.status = _Pidle 4074 if runqempty(p) { 4075 pidleput(p) 4076 } else { 4077 p.m.set(mget()) 4078 p.link.set(runnablePs) 4079 runnablePs = p 4080 } 4081 } 4082 stealOrder.reset(uint32(nprocs)) 4083 var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32 4084 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs)) 4085 return runnablePs 4086 } 4087 4088 // Associate p and the current m. 4089 // 4090 // This function is allowed to have write barriers even if the caller 4091 // isn't because it immediately acquires _p_. 4092 // 4093 //go:yeswritebarrierrec 4094 func acquirep(_p_ *p) { 4095 // Do the part that isn't allowed to have write barriers. 4096 acquirep1(_p_) 4097 4098 // have p; write barriers now allowed 4099 _g_ := getg() 4100 _g_.m.mcache = _p_.mcache 4101 4102 if trace.enabled { 4103 traceProcStart() 4104 } 4105 } 4106 4107 // acquirep1 is the first step of acquirep, which actually acquires 4108 // _p_. This is broken out so we can disallow write barriers for this 4109 // part, since we don't yet have a P. 4110 // 4111 //go:nowritebarrierrec 4112 func acquirep1(_p_ *p) { 4113 _g_ := getg() 4114 4115 if _g_.m.p != 0 || _g_.m.mcache != nil { 4116 throw("acquirep: already in go") 4117 } 4118 if _p_.m != 0 || _p_.status != _Pidle { 4119 id := int64(0) 4120 if _p_.m != 0 { 4121 id = _p_.m.ptr().id 4122 } 4123 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n") 4124 throw("acquirep: invalid p state") 4125 } 4126 _g_.m.p.set(_p_) 4127 _p_.m.set(_g_.m) 4128 _p_.status = _Prunning 4129 } 4130 4131 // Disassociate p and the current m. 4132 func releasep() *p { 4133 _g_ := getg() 4134 4135 if _g_.m.p == 0 || _g_.m.mcache == nil { 4136 throw("releasep: invalid arg") 4137 } 4138 _p_ := _g_.m.p.ptr() 4139 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning { 4140 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n") 4141 throw("releasep: invalid p state") 4142 } 4143 if trace.enabled { 4144 traceProcStop(_g_.m.p.ptr()) 4145 } 4146 _g_.m.p = 0 4147 _g_.m.mcache = nil 4148 _p_.m = 0 4149 _p_.status = _Pidle 4150 return _p_ 4151 } 4152 4153 func incidlelocked(v int32) { 4154 lock(&sched.lock) 4155 sched.nmidlelocked += v 4156 if v > 0 { 4157 checkdead() 4158 } 4159 unlock(&sched.lock) 4160 } 4161 4162 // Check for deadlock situation. 4163 // 检查是否有死锁,通过检查正在运行的M是否为0 4164 // The check is based on number of running M's, if 0 -> deadlock. 4165 // sched.lock must be held. 4166 func checkdead() { 4167 // For -buildmode=c-shared or -buildmode=c-archive it's OK if 4168 // there are no running goroutines. The calling program is 4169 // assumed to be running. 4170 if islibrary || isarchive { 4171 return 4172 } 4173 4174 // If we are dying because of a signal caught on an already idle thread, 4175 // freezetheworld will cause all running threads to block. 4176 // And runtime will essentially enter into deadlock state, 4177 // except that there is a thread that will call exit soon. 4178 if panicking > 0 { 4179 return 4180 } 4181 4182 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys 4183 if run > 0 { 4184 return 4185 } 4186 if run < 0 { 4187 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n") 4188 throw("checkdead: inconsistent counts") 4189 } 4190 4191 grunning := 0 4192 lock(&allglock) 4193 for i := 0; i < len(allgs); i++ { 4194 gp := allgs[i] 4195 if isSystemGoroutine(gp) { 4196 continue 4197 } 4198 s := readgstatus(gp) 4199 switch s &^ _Gscan { 4200 case _Gwaiting: 4201 grunning++ 4202 case _Grunnable, 4203 _Grunning, 4204 _Gsyscall: 4205 unlock(&allglock) 4206 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n") 4207 throw("checkdead: runnable g") 4208 } 4209 } 4210 unlock(&allglock) 4211 if grunning == 0 { // possible if main goroutine calls runtime·Goexit() 4212 throw("no goroutines (main called runtime.Goexit) - deadlock!") 4213 } 4214 4215 // Maybe jump time forward for playground. 4216 gp := timejump() 4217 if gp != nil { 4218 casgstatus(gp, _Gwaiting, _Grunnable) 4219 globrunqput(gp) 4220 _p_ := pidleget() 4221 if _p_ == nil { 4222 throw("checkdead: no p for timer") 4223 } 4224 mp := mget() 4225 if mp == nil { 4226 // There should always be a free M since 4227 // nothing is running. 4228 throw("checkdead: no m for timer") 4229 } 4230 mp.nextp.set(_p_) 4231 notewakeup(&mp.park) 4232 return 4233 } 4234 4235 getg().m.throwing = -1 // do not dump full stacks 4236 throw("all goroutines are asleep - deadlock!") 4237 } 4238 4239 // forcegcperiod is the maximum time in nanoseconds between garbage 4240 // collections. If we go this long without a garbage collection, one 4241 // is forced to run. 4242 // 4243 // This is a variable for testing purposes. It normally doesn't change. 4244 var forcegcperiod int64 = 2 * 60 * 1e9 // 2 minute 4245 4246 // Always runs without a P, so write barriers are not allowed. 4247 // 系统监控,运行在单独os thread(m) 上, 不用P运行 4248 // 4249 //go:nowritebarrierrec 4250 func sysmon() { 4251 lock(&sched.lock) 4252 sched.nmsys++ 4253 checkdead() 4254 unlock(&sched.lock) 4255 4256 // If a heap span goes unused for 5 minutes after a garbage collection, 4257 // we hand it back to the operating system. 4258 // 如果一块span在垃圾回收后五分钟没有被使用,把其还给操作系统(不是立即还) 4259 scavengelimit := int64(5 * 60 * 1e9) 4260 4261 if debug.scavenge > 0 { 4262 // Scavenge-a-lot for testing. 4263 forcegcperiod = 10 * 1e6 4264 scavengelimit = 20 * 1e6 // 20 ms 4265 } 4266 4267 lastscavenge := nanotime() 4268 nscavenge := 0 4269 4270 lasttrace := int64(0) 4271 idle := 0 // how many cycles in succession we had not wokeup somebody 4272 delay := uint32(0) 4273 for { 4274 if idle == 0 { // start with 20us sleep... 4275 delay = 20 // 一开始休眠20微秒 4276 } else if idle > 50 { // start doubling the sleep after 1ms... 4277 delay *= 2 4278 } 4279 // 最多休眠10毫秒 4280 if delay > 10*1000 { // up to 10ms 4281 delay = 10 * 1000 4282 } 4283 usleep(delay) 4284 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { 4285 lock(&sched.lock) 4286 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) { 4287 atomic.Store(&sched.sysmonwait, 1) 4288 unlock(&sched.lock) 4289 // Make wake-up period small enough 4290 // for the sampling to be correct. 4291 maxsleep := forcegcperiod / 2 4292 if scavengelimit < forcegcperiod { 4293 maxsleep = scavengelimit / 2 4294 } 4295 shouldRelax := true 4296 if osRelaxMinNS > 0 { 4297 next := timeSleepUntil() 4298 now := nanotime() 4299 if next-now < osRelaxMinNS { 4300 shouldRelax = false 4301 } 4302 } 4303 if shouldRelax { 4304 osRelax(true) 4305 } 4306 notetsleep(&sched.sysmonnote, maxsleep) 4307 if shouldRelax { 4308 osRelax(false) 4309 } 4310 lock(&sched.lock) 4311 atomic.Store(&sched.sysmonwait, 0) 4312 noteclear(&sched.sysmonnote) 4313 idle = 0 4314 delay = 20 4315 } 4316 unlock(&sched.lock) 4317 } 4318 // trigger libc interceptors if needed 4319 if *cgo_yield != nil { 4320 asmcgocall(*cgo_yield, nil) 4321 } 4322 // poll network if not polled for more than 10ms 4323 // 如果10ms没netpool了, 执行netpool, 定时拉事件, 让一些goroutine可运行, 否则有些goroutine注册了netpoll但永远没法恢复执行, m的schedule里也会时不时执行netpoll 4324 lastpoll := int64(atomic.Load64(&sched.lastpoll)) 4325 now := nanotime() 4326 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now { 4327 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now)) 4328 gp := netpoll(false) // non-blocking - returns list of goroutines 4329 if gp != nil { 4330 // Need to decrement number of idle locked M's 4331 // (pretending that one more is running) before injectglist. 4332 // Otherwise it can lead to the following situation: 4333 // injectglist grabs all P's but before it starts M's to run the P's, 4334 // another M returns from syscall, finishes running its G, 4335 // observes that there is no work to do and no other running M's 4336 // and reports deadlock. 4337 incidlelocked(-1) 4338 injectglist(gp) 4339 incidlelocked(1) 4340 } 4341 } 4342 // retake P's blocked in syscalls 4343 // and preempt long running G's 4344 if retake(now) != 0 { 4345 idle = 0 4346 } else { 4347 idle++ 4348 } 4349 // check if we need to force a GC 4350 // 二分钟左右一强制gc,如果enablegc 4351 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 { 4352 lock(&forcegc.lock) 4353 forcegc.idle = 0 4354 forcegc.g.schedlink = 0 4355 injectglist(forcegc.g) 4356 unlock(&forcegc.lock) 4357 } 4358 // scavenge heap once in a while 4359 if lastscavenge+scavengelimit/2 < now { 4360 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit)) 4361 lastscavenge = now 4362 nscavenge++ 4363 } 4364 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now { 4365 lasttrace = now 4366 schedtrace(debug.scheddetail > 0) 4367 } 4368 } 4369 } 4370 4371 type sysmontick struct { 4372 schedtick uint32 4373 schedwhen int64 4374 syscalltick uint32 4375 syscallwhen int64 4376 } 4377 4378 // forcePreemptNS is the time slice given to a G before it is 4379 // preempted. 4380 const forcePreemptNS = 10 * 1000 * 1000 // 10ms 4381 4382 // 抢占调度 4383 func retake(now int64) uint32 { 4384 n := 0 4385 // Prevent allp slice changes. This lock will be completely 4386 // uncontended unless we're already stopping the world. 4387 lock(&allpLock) 4388 // We can't use a range loop over allp because we may 4389 // temporarily drop the allpLock. Hence, we need to re-fetch 4390 // allp each time around the loop. 4391 for i := 0; i < len(allp); i++ { 4392 _p_ := allp[i] 4393 if _p_ == nil { 4394 // This can happen if procresize has grown 4395 // allp but not yet created new Ps. 4396 continue 4397 } 4398 pd := &_p_.sysmontick 4399 s := _p_.status 4400 if s == _Psyscall { 4401 // Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us). 4402 t := int64(_p_.syscalltick) 4403 if int64(pd.syscalltick) != t { 4404 pd.syscalltick = uint32(t) 4405 pd.syscallwhen = now 4406 continue 4407 } 4408 // On the one hand we don't want to retake Ps if there is no other work to do, 4409 // but on the other hand we want to retake them eventually 4410 // because they can prevent the sysmon thread from deep sleep. 4411 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now { 4412 continue 4413 } 4414 // Drop allpLock so we can take sched.lock. 4415 unlock(&allpLock) 4416 // Need to decrement number of idle locked M's 4417 // (pretending that one more is running) before the CAS. 4418 // Otherwise the M from which we retake can exit the syscall, 4419 // increment nmidle and report deadlock. 4420 incidlelocked(-1) 4421 if atomic.Cas(&_p_.status, s, _Pidle) { 4422 if trace.enabled { 4423 traceGoSysBlock(_p_) 4424 traceProcStop(_p_) 4425 } 4426 n++ 4427 _p_.syscalltick++ 4428 handoffp(_p_) 4429 } 4430 incidlelocked(1) 4431 lock(&allpLock) 4432 } else if s == _Prunning { 4433 // Preempt G if it's running for too long. 4434 // 尽力而为地抢占运行太久的G 4435 t := int64(_p_.schedtick) 4436 if int64(pd.schedtick) != t { 4437 pd.schedtick = uint32(t) 4438 pd.schedwhen = now 4439 continue 4440 } 4441 if pd.schedwhen+forcePreemptNS > now { 4442 continue 4443 } 4444 preemptone(_p_) 4445 } 4446 } 4447 unlock(&allpLock) 4448 return uint32(n) 4449 } 4450 4451 // Tell all goroutines that they have been preempted and they should stop. 4452 // This function is purely best-effort. It can fail to inform a goroutine if a 4453 // processor just started running it. 4454 // No locks need to be held. 4455 // Returns true if preemption request was issued to at least one goroutine. 4456 func preemptall() bool { 4457 res := false 4458 for _, _p_ := range allp { 4459 if _p_.status != _Prunning { 4460 continue 4461 } 4462 if preemptone(_p_) { 4463 res = true 4464 } 4465 } 4466 return res 4467 } 4468 4469 // Tell the goroutine running on processor P to stop. 4470 // This function is purely best-effort. It can incorrectly fail to inform the 4471 // goroutine. It can send inform the wrong goroutine. Even if it informs the 4472 // correct goroutine, that goroutine might ignore the request if it is 4473 // simultaneously executing newstack. 4474 // No lock needs to be held. 4475 // Returns true if preemption request was issued. 4476 // The actual preemption will happen at some point in the future 4477 // and will be indicated by the gp->status no longer being 4478 // Grunning 4479 // 抢占一个P上的goroutine, 就简单设置个标记, 尽力而为 4480 func preemptone(_p_ *p) bool { 4481 mp := _p_.m.ptr() 4482 if mp == nil || mp == getg().m { 4483 return false 4484 } 4485 gp := mp.curg 4486 if gp == nil || gp == mp.g0 { 4487 return false 4488 } 4489 4490 gp.preempt = true 4491 4492 // Every call in a go routine checks for stack overflow by 4493 // comparing the current stack pointer to gp->stackguard0. 4494 // Setting gp->stackguard0 to StackPreempt folds 4495 // preemption into the normal stack overflow check. 4496 gp.stackguard0 = stackPreempt 4497 return true 4498 } 4499 4500 var starttime int64 4501 4502 func schedtrace(detailed bool) { 4503 now := nanotime() 4504 if starttime == 0 { 4505 starttime = now 4506 } 4507 4508 lock(&sched.lock) 4509 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize) 4510 if detailed { 4511 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n") 4512 } 4513 // We must be careful while reading data from P's, M's and G's. 4514 // Even if we hold schedlock, most data can be changed concurrently. 4515 // E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil. 4516 for i, _p_ := range allp { 4517 mp := _p_.m.ptr() 4518 h := atomic.Load(&_p_.runqhead) 4519 t := atomic.Load(&_p_.runqtail) 4520 if detailed { 4521 id := int64(-1) 4522 if mp != nil { 4523 id = mp.id 4524 } 4525 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n") 4526 } else { 4527 // In non-detailed mode format lengths of per-P run queues as: 4528 // [len1 len2 len3 len4] 4529 print(" ") 4530 if i == 0 { 4531 print("[") 4532 } 4533 print(t - h) 4534 if i == len(allp)-1 { 4535 print("]\n") 4536 } 4537 } 4538 } 4539 4540 if !detailed { 4541 unlock(&sched.lock) 4542 return 4543 } 4544 4545 for mp := allm; mp != nil; mp = mp.alllink { 4546 _p_ := mp.p.ptr() 4547 gp := mp.curg 4548 lockedg := mp.lockedg.ptr() 4549 id1 := int32(-1) 4550 if _p_ != nil { 4551 id1 = _p_.id 4552 } 4553 id2 := int64(-1) 4554 if gp != nil { 4555 id2 = gp.goid 4556 } 4557 id3 := int64(-1) 4558 if lockedg != nil { 4559 id3 = lockedg.goid 4560 } 4561 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n") 4562 } 4563 4564 lock(&allglock) 4565 for gi := 0; gi < len(allgs); gi++ { 4566 gp := allgs[gi] 4567 mp := gp.m 4568 lockedm := gp.lockedm.ptr() 4569 id1 := int64(-1) 4570 if mp != nil { 4571 id1 = mp.id 4572 } 4573 id2 := int64(-1) 4574 if lockedm != nil { 4575 id2 = lockedm.id 4576 } 4577 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n") 4578 } 4579 unlock(&allglock) 4580 unlock(&sched.lock) 4581 } 4582 4583 // Put mp on midle list. 4584 // 把这个md放到idle的m列表 4585 // Sched must be locked. 4586 // May run during STW, so write barriers are not allowed. 4587 //go:nowritebarrierrec 4588 func mput(mp *m) { 4589 mp.schedlink = sched.midle 4590 sched.midle.set(mp) 4591 sched.nmidle++ 4592 checkdead() 4593 } 4594 4595 // Try to get an m from midle list. 4596 // Sched must be locked. 4597 // May run during STW, so write barriers are not allowed. 4598 //go:nowritebarrierrec 4599 func mget() *m { 4600 mp := sched.midle.ptr() 4601 if mp != nil { 4602 sched.midle = mp.schedlink 4603 sched.nmidle-- 4604 } 4605 return mp 4606 } 4607 4608 // Put gp on the global runnable queue. 4609 // Sched must be locked. 4610 // May run during STW, so write barriers are not allowed. 4611 //go:nowritebarrierrec 4612 func globrunqput(gp *g) { 4613 gp.schedlink = 0 4614 if sched.runqtail != 0 { 4615 sched.runqtail.ptr().schedlink.set(gp) 4616 } else { 4617 sched.runqhead.set(gp) 4618 } 4619 sched.runqtail.set(gp) 4620 sched.runqsize++ 4621 } 4622 4623 // Put gp at the head of the global runnable queue. 4624 // Sched must be locked. 4625 // May run during STW, so write barriers are not allowed. 4626 //go:nowritebarrierrec 4627 func globrunqputhead(gp *g) { 4628 gp.schedlink = sched.runqhead 4629 sched.runqhead.set(gp) 4630 if sched.runqtail == 0 { 4631 sched.runqtail.set(gp) 4632 } 4633 sched.runqsize++ 4634 } 4635 4636 // Put a batch of runnable goroutines on the global runnable queue. 4637 // Sched must be locked. 4638 func globrunqputbatch(ghead *g, gtail *g, n int32) { 4639 gtail.schedlink = 0 4640 if sched.runqtail != 0 { 4641 sched.runqtail.ptr().schedlink.set(ghead) 4642 } else { 4643 sched.runqhead.set(ghead) 4644 } 4645 sched.runqtail.set(gtail) 4646 sched.runqsize += n 4647 } 4648 4649 // Try get a batch of G's from the global runnable queue. 4650 // Sched must be locked. 4651 func globrunqget(_p_ *p, max int32) *g { 4652 if sched.runqsize == 0 { 4653 return nil 4654 } 4655 4656 n := sched.runqsize/gomaxprocs + 1 4657 if n > sched.runqsize { 4658 n = sched.runqsize 4659 } 4660 if max > 0 && n > max { 4661 n = max 4662 } 4663 if n > int32(len(_p_.runq))/2 { 4664 n = int32(len(_p_.runq)) / 2 4665 } 4666 4667 sched.runqsize -= n 4668 if sched.runqsize == 0 { 4669 sched.runqtail = 0 4670 } 4671 4672 gp := sched.runqhead.ptr() 4673 sched.runqhead = gp.schedlink 4674 n-- 4675 for ; n > 0; n-- { 4676 gp1 := sched.runqhead.ptr() 4677 sched.runqhead = gp1.schedlink 4678 runqput(_p_, gp1, false) 4679 } 4680 return gp 4681 } 4682 4683 // Put p to on _Pidle list. 4684 // Sched must be locked. 4685 // May run during STW, so write barriers are not allowed. 4686 //go:nowritebarrierrec 4687 func pidleput(_p_ *p) { 4688 if !runqempty(_p_) { 4689 throw("pidleput: P has non-empty run queue") 4690 } 4691 _p_.link = sched.pidle 4692 sched.pidle.set(_p_) 4693 atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic 4694 } 4695 4696 // Try get a p from _Pidle list. 4697 // Sched must be locked. 4698 // May run during STW, so write barriers are not allowed. 4699 //go:nowritebarrierrec 4700 func pidleget() *p { 4701 _p_ := sched.pidle.ptr() 4702 if _p_ != nil { 4703 sched.pidle = _p_.link 4704 atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic 4705 } 4706 return _p_ 4707 } 4708 4709 // runqempty returns true if _p_ has no Gs on its local run queue. 4710 // It never returns true spuriously. 4711 func runqempty(_p_ *p) bool { 4712 // Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail, 4713 // 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext. 4714 // Simply observing that runqhead == runqtail and then observing that runqnext == nil 4715 // does not mean the queue is empty. 4716 for { 4717 head := atomic.Load(&_p_.runqhead) 4718 tail := atomic.Load(&_p_.runqtail) 4719 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext))) 4720 if tail == atomic.Load(&_p_.runqtail) { 4721 return head == tail && runnext == 0 4722 } 4723 } 4724 } 4725 4726 // To shake out latent assumptions about scheduling order, 4727 // we introduce some randomness into scheduling decisions 4728 // when running with the race detector. 4729 // The need for this was made obvious by changing the 4730 // (deterministic) scheduling order in Go 1.5 and breaking 4731 // many poorly-written tests. 4732 // With the randomness here, as long as the tests pass 4733 // consistently with -race, they shouldn't have latent scheduling 4734 // assumptions. 4735 const randomizeScheduler = raceenabled 4736 4737 // runqput tries to put g on the local runnable queue. 4738 // If next if false, runqput adds g to the tail of the runnable queue. 4739 // If next is true, runqput puts g in the _p_.runnext slot. 4740 // If the run queue is full, runnext puts g on the global queue. 4741 // Executed only by the owner P. 4742 // runqput 把g放到本地运行队列(p的运行队列) 4743 // 如果next为false, 会将g放到运行队列的尾部 4744 // 如果next为true, 会把其放到runnext, 原先的runnext的会放到队列中 4745 // 如果运行队列满了, 将g和p中的一批放到全局队列 4746 func runqput(_p_ *p, gp *g, next bool) { 4747 if randomizeScheduler && next && fastrand()%2 == 0 { 4748 next = false 4749 } 4750 4751 if next { 4752 retryNext: 4753 oldnext := _p_.runnext 4754 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) { 4755 goto retryNext 4756 } 4757 if oldnext == 0 { 4758 return 4759 } 4760 // Kick the old runnext out to the regular run queue. 4761 gp = oldnext.ptr() 4762 } 4763 4764 retry: 4765 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers, consumer应该指m 4766 t := _p_.runqtail 4767 if t-h < uint32(len(_p_.runq)) { 4768 _p_.runq[t%uint32(len(_p_.runq))].set(gp) 4769 atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption 4770 return 4771 } 4772 if runqputslow(_p_, gp, h, t) { // true表示gp已经加到全局队列 4773 return 4774 } 4775 // the queue is not full, now the put above must succeed 4776 goto retry 4777 } 4778 4779 // Put g and a batch of work from local runnable queue on global queue. 4780 // Executed only by the owner P. 4781 func runqputslow(_p_ *p, gp *g, h, t uint32) bool { 4782 var batch [len(_p_.runq)/2 + 1]*g 4783 4784 // First, grab a batch from local queue. 4785 n := t - h 4786 n = n / 2 4787 if n != uint32(len(_p_.runq)/2) { 4788 throw("runqputslow: queue is not full") 4789 } 4790 for i := uint32(0); i < n; i++ { 4791 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr() 4792 } 4793 if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4794 // 有其他地方在更新runqhead, 则啥都不做 4795 return false 4796 } 4797 batch[n] = gp 4798 4799 if randomizeScheduler { 4800 for i := uint32(1); i <= n; i++ { 4801 j := fastrandn(i + 1) 4802 batch[i], batch[j] = batch[j], batch[i] 4803 } 4804 } 4805 4806 // Link the goroutines. 4807 for i := uint32(0); i < n; i++ { 4808 batch[i].schedlink.set(batch[i+1]) 4809 } 4810 4811 // Now put the batch on global queue. 4812 lock(&sched.lock) 4813 globrunqputbatch(batch[0], batch[n], int32(n+1)) 4814 unlock(&sched.lock) 4815 return true 4816 } 4817 4818 // Get g from local runnable queue. 4819 // If inheritTime is true, gp should inherit the remaining time in the 4820 // current time slice. Otherwise, it should start a new time slice. 4821 // Executed only by the owner P. 4822 func runqget(_p_ *p) (gp *g, inheritTime bool) { 4823 // If there's a runnext, it's the next G to run. 4824 for { 4825 next := _p_.runnext 4826 if next == 0 { 4827 break 4828 } 4829 if _p_.runnext.cas(next, 0) { 4830 return next.ptr(), true 4831 } 4832 } 4833 4834 for { 4835 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4836 t := _p_.runqtail 4837 if t == h { 4838 return nil, false 4839 } 4840 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr() 4841 if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume 4842 return gp, false 4843 } 4844 } 4845 } 4846 4847 // Grabs a batch of goroutines from _p_'s runnable queue into batch. 4848 // Batch is a ring buffer starting at batchHead. 4849 // Returns number of grabbed goroutines. 4850 // Can be executed by any P. 4851 // 从p的runq里取一半g放到batch 4852 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 4853 for { 4854 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers 4855 t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer 4856 n := t - h 4857 n = n - n/2 4858 if n == 0 { 4859 if stealRunNextG { 4860 // Try to steal from _p_.runnext. 4861 if next := _p_.runnext; next != 0 { 4862 if _p_.status == _Prunning { 4863 // Sleep to ensure that _p_ isn't about to run the g 4864 // we are about to steal. 4865 // The important use case here is when the g running 4866 // on _p_ ready()s another g and then almost 4867 // immediately blocks. Instead of stealing runnext 4868 // in this window, back off to give _p_ a chance to 4869 // schedule runnext. This will avoid thrashing gs 4870 // between different Ps. 4871 // A sync chan send/recv takes ~50ns as of time of 4872 // writing, so 3us gives ~50x overshoot. 4873 if GOOS != "windows" { 4874 usleep(3) 4875 } else { 4876 // On windows system timer granularity is 4877 // 1-15ms, which is way too much for this 4878 // optimization. So just yield. 4879 osyield() 4880 } 4881 } 4882 if !_p_.runnext.cas(next, 0) { 4883 continue 4884 } 4885 batch[batchHead%uint32(len(batch))] = next 4886 return 1 4887 } 4888 } 4889 return 0 4890 } 4891 if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t 4892 continue 4893 } 4894 for i := uint32(0); i < n; i++ { 4895 g := _p_.runq[(h+i)%uint32(len(_p_.runq))] 4896 batch[(batchHead+i)%uint32(len(batch))] = g 4897 } 4898 if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume 4899 return n 4900 } 4901 } 4902 } 4903 4904 // Steal half of elements from local runnable queue of p2 4905 // and put onto local runnable queue of p. 4906 // Returns one of the stolen elements (or nil if failed). 4907 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { 4908 t := _p_.runqtail 4909 n := runqgrab(p2, &_p_.runq, t, stealRunNextG) 4910 if n == 0 { 4911 return nil 4912 } 4913 n-- 4914 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr() 4915 if n == 0 { 4916 return gp 4917 } 4918 h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers 4919 if t-h+n >= uint32(len(_p_.runq)) { 4920 throw("runqsteal: runq overflow") 4921 } 4922 atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption 4923 return gp 4924 } 4925 4926 //go:linkname setMaxThreads runtime/debug.setMaxThreads 4927 func setMaxThreads(in int) (out int) { 4928 lock(&sched.lock) 4929 out = int(sched.maxmcount) 4930 if in > 0x7fffffff { // MaxInt32 4931 sched.maxmcount = 0x7fffffff 4932 } else { 4933 sched.maxmcount = int32(in) 4934 } 4935 checkmcount() 4936 unlock(&sched.lock) 4937 return 4938 } 4939 4940 func haveexperiment(name string) bool { 4941 if name == "framepointer" { 4942 return framepointer_enabled // set by linker 4943 } 4944 x := sys.Goexperiment 4945 for x != "" { 4946 xname := "" 4947 i := index(x, ",") 4948 if i < 0 { 4949 xname, x = x, "" 4950 } else { 4951 xname, x = x[:i], x[i+1:] 4952 } 4953 if xname == name { 4954 return true 4955 } 4956 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name { 4957 return false 4958 } 4959 } 4960 return false 4961 } 4962 4963 //go:nosplit 4964 func procPin() int { 4965 _g_ := getg() 4966 mp := _g_.m 4967 4968 mp.locks++ 4969 return int(mp.p.ptr().id) 4970 } 4971 4972 //go:nosplit 4973 func procUnpin() { 4974 _g_ := getg() 4975 _g_.m.locks-- 4976 } 4977 4978 //go:linkname sync_runtime_procPin sync.runtime_procPin 4979 //go:nosplit 4980 func sync_runtime_procPin() int { 4981 return procPin() 4982 } 4983 4984 //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 4985 //go:nosplit 4986 func sync_runtime_procUnpin() { 4987 procUnpin() 4988 } 4989 4990 //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin 4991 //go:nosplit 4992 func sync_atomic_runtime_procPin() int { 4993 return procPin() 4994 } 4995 4996 //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin 4997 //go:nosplit 4998 func sync_atomic_runtime_procUnpin() { 4999 procUnpin() 5000 } 5001 5002 // Active spinning for sync.Mutex. 5003 //go:linkname sync_runtime_canSpin sync.runtime_canSpin 5004 //go:nosplit 5005 func sync_runtime_canSpin(i int) bool { 5006 // sync.Mutex is cooperative, so we are conservative with spinning. 5007 // Spin only few times and only if running on a multicore machine and 5008 // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. 5009 // As opposed to runtime mutex we don't do passive spinning here, 5010 // because there can be work on global runq on on other Ps. 5011 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { 5012 return false 5013 } 5014 if p := getg().m.p.ptr(); !runqempty(p) { 5015 return false 5016 } 5017 return true 5018 } 5019 5020 //go:linkname sync_runtime_doSpin sync.runtime_doSpin 5021 //go:nosplit 5022 func sync_runtime_doSpin() { 5023 procyield(active_spin_cnt) 5024 } 5025 5026 var stealOrder randomOrder 5027 5028 // randomOrder/randomEnum are helper types for randomized work stealing. 5029 // They allow to enumerate all Ps in different pseudo-random orders without repetitions. 5030 // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS 5031 // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration. 5032 type randomOrder struct { 5033 count uint32 5034 coprimes []uint32 5035 } 5036 5037 type randomEnum struct { 5038 i uint32 5039 count uint32 5040 pos uint32 5041 inc uint32 5042 } 5043 5044 func (ord *randomOrder) reset(count uint32) { 5045 ord.count = count 5046 ord.coprimes = ord.coprimes[:0] 5047 for i := uint32(1); i <= count; i++ { 5048 if gcd(i, count) == 1 { 5049 ord.coprimes = append(ord.coprimes, i) 5050 } 5051 } 5052 } 5053 5054 func (ord *randomOrder) start(i uint32) randomEnum { 5055 return randomEnum{ 5056 count: ord.count, 5057 pos: i % ord.count, 5058 inc: ord.coprimes[i%uint32(len(ord.coprimes))], 5059 } 5060 } 5061 5062 func (enum *randomEnum) done() bool { 5063 return enum.i == enum.count 5064 } 5065 5066 func (enum *randomEnum) next() { 5067 enum.i++ 5068 enum.pos = (enum.pos + enum.inc) % enum.count 5069 } 5070 5071 func (enum *randomEnum) position() uint32 { 5072 return enum.pos 5073 } 5074 5075 func gcd(a, b uint32) uint32 { 5076 for b != 0 { 5077 a, b = b, a%b 5078 } 5079 return a 5080 }