github.com/dannin/go@v0.0.0-20161031215817-d35dfd405eaa/src/runtime/proc.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  var buildVersion = sys.TheVersion
    14  
    15  // Goroutine scheduler
    16  // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
    17  //
    18  // The main concepts are:
    19  // G - goroutine.
    20  // M - worker thread, or machine.
    21  // P - processor, a resource that is required to execute Go code.
    22  //     M must have an associated P to execute Go code, however it can be
    23  //     blocked or in a syscall w/o an associated P.
    24  //
    25  // Design doc at https://golang.org/s/go11sched.
    26  
    27  // Worker thread parking/unparking.
    28  // We need to balance between keeping enough running worker threads to utilize
    29  // available hardware parallelism and parking excessive running worker threads
    30  // to conserve CPU resources and power. This is not simple for two reasons:
    31  // (1) scheduler state is intentionally distributed (in particular, per-P work
    32  // queues), so it is not possible to compute global predicates on fast paths;
    33  // (2) for optimal thread management we would need to know the future (don't park
    34  // a worker thread when a new goroutine will be readied in near future).
    35  //
    36  // Three rejected approaches that would work badly:
    37  // 1. Centralize all scheduler state (would inhibit scalability).
    38  // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
    39  //    is a spare P, unpark a thread and handoff it the thread and the goroutine.
    40  //    This would lead to thread state thrashing, as the thread that readied the
    41  //    goroutine can be out of work the very next moment, we will need to park it.
    42  //    Also, it would destroy locality of computation as we want to preserve
    43  //    dependent goroutines on the same thread; and introduce additional latency.
    44  // 3. Unpark an additional thread whenever we ready a goroutine and there is an
    45  //    idle P, but don't do handoff. This would lead to excessive thread parking/
    46  //    unparking as the additional threads will instantly park without discovering
    47  //    any work to do.
    48  //
    49  // The current approach:
    50  // We unpark an additional thread when we ready a goroutine if (1) there is an
    51  // idle P and there are no "spinning" worker threads. A worker thread is considered
    52  // spinning if it is out of local work and did not find work in global run queue/
    53  // netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
    54  // Threads unparked this way are also considered spinning; we don't do goroutine
    55  // handoff so such threads are out of work initially. Spinning threads do some
    56  // spinning looking for work in per-P run queues before parking. If a spinning
    57  // thread finds work it takes itself out of the spinning state and proceeds to
    58  // execution. If it does not find work it takes itself out of the spinning state
    59  // and then parks.
    60  // If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
    61  // new threads when readying goroutines. To compensate for that, if the last spinning
    62  // thread finds work and stops spinning, it must unpark a new spinning thread.
    63  // This approach smooths out unjustified spikes of thread unparking,
    64  // but at the same time guarantees eventual maximal CPU parallelism utilization.
    65  //
    66  // The main implementation complication is that we need to be very careful during
    67  // spinning->non-spinning thread transition. This transition can race with submission
    68  // of a new goroutine, and either one part or another needs to unpark another worker
    69  // thread. If they both fail to do that, we can end up with semi-persistent CPU
    70  // underutilization. The general pattern for goroutine readying is: submit a goroutine
    71  // to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
    72  // The general pattern for spinning->non-spinning transition is: decrement nmspinning,
    73  // #StoreLoad-style memory barrier, check all per-P work queues for new work.
    74  // Note that all this complexity does not apply to global run queue as we are not
    75  // sloppy about thread unparking when submitting to global queue. Also see comments
    76  // for nmspinning manipulation.
    77  
    78  var (
    79  	m0           m
    80  	g0           g
    81  	raceprocctx0 uintptr
    82  )
    83  
    84  //go:linkname runtime_init runtime.init
    85  func runtime_init()
    86  
    87  //go:linkname main_init main.init
    88  func main_init()
    89  
    90  // main_init_done is a signal used by cgocallbackg that initialization
    91  // has been completed. It is made before _cgo_notify_runtime_init_done,
    92  // so all cgo calls can rely on it existing. When main_init is complete,
    93  // it is closed, meaning cgocallbackg can reliably receive from it.
    94  var main_init_done chan bool
    95  
    96  //go:linkname main_main main.main
    97  func main_main()
    98  
    99  // runtimeInitTime is the nanotime() at which the runtime started.
   100  var runtimeInitTime int64
   101  
   102  // Value to use for signal mask for newly created M's.
   103  var initSigmask sigset
   104  
   105  // The main goroutine.
   106  func main() {
   107  	g := getg()
   108  
   109  	// Racectx of m0->g0 is used only as the parent of the main goroutine.
   110  	// It must not be used for anything else.
   111  	g.m.g0.racectx = 0
   112  
   113  	// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
   114  	// Using decimal instead of binary GB and MB because
   115  	// they look nicer in the stack overflow failure message.
   116  	if sys.PtrSize == 8 {
   117  		maxstacksize = 1000000000
   118  	} else {
   119  		maxstacksize = 250000000
   120  	}
   121  
   122  	// Record when the world started.
   123  	runtimeInitTime = nanotime()
   124  
   125  	systemstack(func() {
   126  		newm(sysmon, nil)
   127  	})
   128  
   129  	// Lock the main goroutine onto this, the main OS thread,
   130  	// during initialization. Most programs won't care, but a few
   131  	// do require certain calls to be made by the main thread.
   132  	// Those can arrange for main.main to run in the main thread
   133  	// by calling runtime.LockOSThread during initialization
   134  	// to preserve the lock.
   135  	lockOSThread()
   136  
   137  	if g.m != &m0 {
   138  		throw("runtime.main not on m0")
   139  	}
   140  
   141  	runtime_init() // must be before defer
   142  
   143  	// Defer unlock so that runtime.Goexit during init does the unlock too.
   144  	needUnlock := true
   145  	defer func() {
   146  		if needUnlock {
   147  			unlockOSThread()
   148  		}
   149  	}()
   150  
   151  	gcenable()
   152  
   153  	main_init_done = make(chan bool)
   154  	if iscgo {
   155  		if _cgo_thread_start == nil {
   156  			throw("_cgo_thread_start missing")
   157  		}
   158  		if GOOS != "windows" {
   159  			if _cgo_setenv == nil {
   160  				throw("_cgo_setenv missing")
   161  			}
   162  			if _cgo_unsetenv == nil {
   163  				throw("_cgo_unsetenv missing")
   164  			}
   165  		}
   166  		if _cgo_notify_runtime_init_done == nil {
   167  			throw("_cgo_notify_runtime_init_done missing")
   168  		}
   169  		cgocall(_cgo_notify_runtime_init_done, nil)
   170  	}
   171  
   172  	fn := main_init // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
   173  	fn()
   174  	close(main_init_done)
   175  
   176  	needUnlock = false
   177  	unlockOSThread()
   178  
   179  	if isarchive || islibrary {
   180  		// A program compiled with -buildmode=c-archive or c-shared
   181  		// has a main, but it is not executed.
   182  		return
   183  	}
   184  	fn = main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
   185  	fn()
   186  	if raceenabled {
   187  		racefini()
   188  	}
   189  
   190  	// Make racy client program work: if panicking on
   191  	// another goroutine at the same time as main returns,
   192  	// let the other goroutine finish printing the panic trace.
   193  	// Once it does, it will exit. See issue 3934.
   194  	if panicking != 0 {
   195  		gopark(nil, nil, "panicwait", traceEvGoStop, 1)
   196  	}
   197  
   198  	exit(0)
   199  	for {
   200  		var x *int32
   201  		*x = 0
   202  	}
   203  }
   204  
   205  // os_beforeExit is called from os.Exit(0).
   206  //go:linkname os_beforeExit os.runtime_beforeExit
   207  func os_beforeExit() {
   208  	if raceenabled {
   209  		racefini()
   210  	}
   211  }
   212  
   213  // start forcegc helper goroutine
   214  func init() {
   215  	go forcegchelper()
   216  }
   217  
   218  func forcegchelper() {
   219  	forcegc.g = getg()
   220  	for {
   221  		lock(&forcegc.lock)
   222  		if forcegc.idle != 0 {
   223  			throw("forcegc: phase error")
   224  		}
   225  		atomic.Store(&forcegc.idle, 1)
   226  		goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
   227  		// this goroutine is explicitly resumed by sysmon
   228  		if debug.gctrace > 0 {
   229  			println("GC forced")
   230  		}
   231  		gcStart(gcBackgroundMode, true)
   232  	}
   233  }
   234  
   235  //go:nosplit
   236  
   237  // Gosched yields the processor, allowing other goroutines to run. It does not
   238  // suspend the current goroutine, so execution resumes automatically.
   239  func Gosched() {
   240  	mcall(gosched_m)
   241  }
   242  
   243  // Puts the current goroutine into a waiting state and calls unlockf.
   244  // If unlockf returns false, the goroutine is resumed.
   245  // unlockf must not access this G's stack, as it may be moved between
   246  // the call to gopark and the call to unlockf.
   247  func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
   248  	mp := acquirem()
   249  	gp := mp.curg
   250  	status := readgstatus(gp)
   251  	if status != _Grunning && status != _Gscanrunning {
   252  		throw("gopark: bad g status")
   253  	}
   254  	mp.waitlock = lock
   255  	mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
   256  	gp.waitreason = reason
   257  	mp.waittraceev = traceEv
   258  	mp.waittraceskip = traceskip
   259  	releasem(mp)
   260  	// can't do anything that might move the G between Ms here.
   261  	mcall(park_m)
   262  }
   263  
   264  // Puts the current goroutine into a waiting state and unlocks the lock.
   265  // The goroutine can be made runnable again by calling goready(gp).
   266  func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
   267  	gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
   268  }
   269  
   270  func goready(gp *g, traceskip int) {
   271  	systemstack(func() {
   272  		ready(gp, traceskip, true)
   273  	})
   274  }
   275  
   276  //go:nosplit
   277  func acquireSudog() *sudog {
   278  	// Delicate dance: the semaphore implementation calls
   279  	// acquireSudog, acquireSudog calls new(sudog),
   280  	// new calls malloc, malloc can call the garbage collector,
   281  	// and the garbage collector calls the semaphore implementation
   282  	// in stopTheWorld.
   283  	// Break the cycle by doing acquirem/releasem around new(sudog).
   284  	// The acquirem/releasem increments m.locks during new(sudog),
   285  	// which keeps the garbage collector from being invoked.
   286  	mp := acquirem()
   287  	pp := mp.p.ptr()
   288  	if len(pp.sudogcache) == 0 {
   289  		lock(&sched.sudoglock)
   290  		// First, try to grab a batch from central cache.
   291  		for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
   292  			s := sched.sudogcache
   293  			sched.sudogcache = s.next
   294  			s.next = nil
   295  			pp.sudogcache = append(pp.sudogcache, s)
   296  		}
   297  		unlock(&sched.sudoglock)
   298  		// If the central cache is empty, allocate a new one.
   299  		if len(pp.sudogcache) == 0 {
   300  			pp.sudogcache = append(pp.sudogcache, new(sudog))
   301  		}
   302  	}
   303  	n := len(pp.sudogcache)
   304  	s := pp.sudogcache[n-1]
   305  	pp.sudogcache[n-1] = nil
   306  	pp.sudogcache = pp.sudogcache[:n-1]
   307  	if s.elem != nil {
   308  		throw("acquireSudog: found s.elem != nil in cache")
   309  	}
   310  	releasem(mp)
   311  	return s
   312  }
   313  
   314  //go:nosplit
   315  func releaseSudog(s *sudog) {
   316  	if s.elem != nil {
   317  		throw("runtime: sudog with non-nil elem")
   318  	}
   319  	if s.selectdone != nil {
   320  		throw("runtime: sudog with non-nil selectdone")
   321  	}
   322  	if s.next != nil {
   323  		throw("runtime: sudog with non-nil next")
   324  	}
   325  	if s.prev != nil {
   326  		throw("runtime: sudog with non-nil prev")
   327  	}
   328  	if s.waitlink != nil {
   329  		throw("runtime: sudog with non-nil waitlink")
   330  	}
   331  	if s.c != nil {
   332  		throw("runtime: sudog with non-nil c")
   333  	}
   334  	gp := getg()
   335  	if gp.param != nil {
   336  		throw("runtime: releaseSudog with non-nil gp.param")
   337  	}
   338  	mp := acquirem() // avoid rescheduling to another P
   339  	pp := mp.p.ptr()
   340  	if len(pp.sudogcache) == cap(pp.sudogcache) {
   341  		// Transfer half of local cache to the central cache.
   342  		var first, last *sudog
   343  		for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
   344  			n := len(pp.sudogcache)
   345  			p := pp.sudogcache[n-1]
   346  			pp.sudogcache[n-1] = nil
   347  			pp.sudogcache = pp.sudogcache[:n-1]
   348  			if first == nil {
   349  				first = p
   350  			} else {
   351  				last.next = p
   352  			}
   353  			last = p
   354  		}
   355  		lock(&sched.sudoglock)
   356  		last.next = sched.sudogcache
   357  		sched.sudogcache = first
   358  		unlock(&sched.sudoglock)
   359  	}
   360  	pp.sudogcache = append(pp.sudogcache, s)
   361  	releasem(mp)
   362  }
   363  
   364  // funcPC returns the entry PC of the function f.
   365  // It assumes that f is a func value. Otherwise the behavior is undefined.
   366  //go:nosplit
   367  func funcPC(f interface{}) uintptr {
   368  	return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
   369  }
   370  
   371  // called from assembly
   372  func badmcall(fn func(*g)) {
   373  	throw("runtime: mcall called on m->g0 stack")
   374  }
   375  
   376  func badmcall2(fn func(*g)) {
   377  	throw("runtime: mcall function returned")
   378  }
   379  
   380  func badreflectcall() {
   381  	panic(plainError("arg size to reflect.call more than 1GB"))
   382  }
   383  
   384  var badmorestackg0Msg = "fatal: morestack on g0\n"
   385  
   386  //go:nosplit
   387  //go:nowritebarrierrec
   388  func badmorestackg0() {
   389  	sp := stringStructOf(&badmorestackg0Msg)
   390  	write(2, sp.str, int32(sp.len))
   391  }
   392  
   393  var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
   394  
   395  //go:nosplit
   396  //go:nowritebarrierrec
   397  func badmorestackgsignal() {
   398  	sp := stringStructOf(&badmorestackgsignalMsg)
   399  	write(2, sp.str, int32(sp.len))
   400  }
   401  
   402  //go:nosplit
   403  func badctxt() {
   404  	throw("ctxt != 0")
   405  }
   406  
   407  func lockedOSThread() bool {
   408  	gp := getg()
   409  	return gp.lockedm != nil && gp.m.lockedg != nil
   410  }
   411  
   412  var (
   413  	allgs    []*g
   414  	allglock mutex
   415  )
   416  
   417  func allgadd(gp *g) {
   418  	if readgstatus(gp) == _Gidle {
   419  		throw("allgadd: bad status Gidle")
   420  	}
   421  
   422  	lock(&allglock)
   423  	allgs = append(allgs, gp)
   424  	allglen = uintptr(len(allgs))
   425  
   426  	// Grow GC rescan list if necessary.
   427  	if len(allgs) > cap(work.rescan.list) {
   428  		lock(&work.rescan.lock)
   429  		l := work.rescan.list
   430  		// Let append do the heavy lifting, but keep the
   431  		// length the same.
   432  		work.rescan.list = append(l[:cap(l)], 0)[:len(l)]
   433  		unlock(&work.rescan.lock)
   434  	}
   435  	unlock(&allglock)
   436  }
   437  
   438  const (
   439  	// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
   440  	// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
   441  	_GoidCacheBatch = 16
   442  )
   443  
   444  // The bootstrap sequence is:
   445  //
   446  //	call osinit
   447  //	call schedinit
   448  //	make & queue new G
   449  //	call runtime·mstart
   450  //
   451  // The new G calls runtime·main.
   452  func schedinit() {
   453  	// raceinit must be the first call to race detector.
   454  	// In particular, it must be done before mallocinit below calls racemapshadow.
   455  	_g_ := getg()
   456  	if raceenabled {
   457  		_g_.racectx, raceprocctx0 = raceinit()
   458  	}
   459  
   460  	sched.maxmcount = 10000
   461  
   462  	tracebackinit()
   463  	moduledataverify()
   464  	stackinit()
   465  	mallocinit()
   466  	mcommoninit(_g_.m)
   467  	alginit()       // maps must not be used before this call
   468  	typelinksinit() // uses maps
   469  	itabsinit()
   470  
   471  	msigsave(_g_.m)
   472  	initSigmask = _g_.m.sigmask
   473  
   474  	goargs()
   475  	goenvs()
   476  	parsedebugvars()
   477  	gcinit()
   478  
   479  	sched.lastpoll = uint64(nanotime())
   480  	procs := int(ncpu)
   481  	if procs > _MaxGomaxprocs {
   482  		procs = _MaxGomaxprocs
   483  	}
   484  	if n := atoi(gogetenv("GOMAXPROCS")); n > 0 {
   485  		if n > _MaxGomaxprocs {
   486  			n = _MaxGomaxprocs
   487  		}
   488  		procs = n
   489  	}
   490  	if procresize(int32(procs)) != nil {
   491  		throw("unknown runnable goroutine during bootstrap")
   492  	}
   493  
   494  	if buildVersion == "" {
   495  		// Condition should never trigger. This code just serves
   496  		// to ensure runtime·buildVersion is kept in the resulting binary.
   497  		buildVersion = "unknown"
   498  	}
   499  }
   500  
   501  func dumpgstatus(gp *g) {
   502  	_g_ := getg()
   503  	print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   504  	print("runtime:  g:  g=", _g_, ", goid=", _g_.goid, ",  g->atomicstatus=", readgstatus(_g_), "\n")
   505  }
   506  
   507  func checkmcount() {
   508  	// sched lock is held
   509  	if sched.mcount > sched.maxmcount {
   510  		print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
   511  		throw("thread exhaustion")
   512  	}
   513  }
   514  
   515  func mcommoninit(mp *m) {
   516  	_g_ := getg()
   517  
   518  	// g0 stack won't make sense for user (and is not necessary unwindable).
   519  	if _g_ != _g_.m.g0 {
   520  		callers(1, mp.createstack[:])
   521  	}
   522  
   523  	mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks())
   524  	if mp.fastrand == 0 {
   525  		mp.fastrand = 0x49f6428a
   526  	}
   527  
   528  	lock(&sched.lock)
   529  	mp.id = sched.mcount
   530  	sched.mcount++
   531  	checkmcount()
   532  	mpreinit(mp)
   533  	if mp.gsignal != nil {
   534  		mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
   535  	}
   536  
   537  	// Add to allm so garbage collector doesn't free g->m
   538  	// when it is just in a register or thread-local storage.
   539  	mp.alllink = allm
   540  
   541  	// NumCgoCall() iterates over allm w/o schedlock,
   542  	// so we need to publish it safely.
   543  	atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
   544  	unlock(&sched.lock)
   545  
   546  	// Allocate memory to hold a cgo traceback if the cgo call crashes.
   547  	if iscgo || GOOS == "solaris" || GOOS == "windows" {
   548  		mp.cgoCallers = new(cgoCallers)
   549  	}
   550  }
   551  
   552  // Mark gp ready to run.
   553  func ready(gp *g, traceskip int, next bool) {
   554  	if trace.enabled {
   555  		traceGoUnpark(gp, traceskip)
   556  	}
   557  
   558  	status := readgstatus(gp)
   559  
   560  	// Mark runnable.
   561  	_g_ := getg()
   562  	_g_.m.locks++ // disable preemption because it can be holding p in a local var
   563  	if status&^_Gscan != _Gwaiting {
   564  		dumpgstatus(gp)
   565  		throw("bad g->status in ready")
   566  	}
   567  
   568  	// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
   569  	casgstatus(gp, _Gwaiting, _Grunnable)
   570  	runqput(_g_.m.p.ptr(), gp, next)
   571  	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
   572  		wakep()
   573  	}
   574  	_g_.m.locks--
   575  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack
   576  		_g_.stackguard0 = stackPreempt
   577  	}
   578  }
   579  
   580  func gcprocs() int32 {
   581  	// Figure out how many CPUs to use during GC.
   582  	// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
   583  	lock(&sched.lock)
   584  	n := gomaxprocs
   585  	if n > ncpu {
   586  		n = ncpu
   587  	}
   588  	if n > _MaxGcproc {
   589  		n = _MaxGcproc
   590  	}
   591  	if n > sched.nmidle+1 { // one M is currently running
   592  		n = sched.nmidle + 1
   593  	}
   594  	unlock(&sched.lock)
   595  	return n
   596  }
   597  
   598  func needaddgcproc() bool {
   599  	lock(&sched.lock)
   600  	n := gomaxprocs
   601  	if n > ncpu {
   602  		n = ncpu
   603  	}
   604  	if n > _MaxGcproc {
   605  		n = _MaxGcproc
   606  	}
   607  	n -= sched.nmidle + 1 // one M is currently running
   608  	unlock(&sched.lock)
   609  	return n > 0
   610  }
   611  
   612  func helpgc(nproc int32) {
   613  	_g_ := getg()
   614  	lock(&sched.lock)
   615  	pos := 0
   616  	for n := int32(1); n < nproc; n++ { // one M is currently running
   617  		if allp[pos].mcache == _g_.m.mcache {
   618  			pos++
   619  		}
   620  		mp := mget()
   621  		if mp == nil {
   622  			throw("gcprocs inconsistency")
   623  		}
   624  		mp.helpgc = n
   625  		mp.p.set(allp[pos])
   626  		mp.mcache = allp[pos].mcache
   627  		pos++
   628  		notewakeup(&mp.park)
   629  	}
   630  	unlock(&sched.lock)
   631  }
   632  
   633  // freezeStopWait is a large value that freezetheworld sets
   634  // sched.stopwait to in order to request that all Gs permanently stop.
   635  const freezeStopWait = 0x7fffffff
   636  
   637  // Similar to stopTheWorld but best-effort and can be called several times.
   638  // There is no reverse operation, used during crashing.
   639  // This function must not lock any mutexes.
   640  func freezetheworld() {
   641  	// stopwait and preemption requests can be lost
   642  	// due to races with concurrently executing threads,
   643  	// so try several times
   644  	for i := 0; i < 5; i++ {
   645  		// this should tell the scheduler to not start any new goroutines
   646  		sched.stopwait = freezeStopWait
   647  		atomic.Store(&sched.gcwaiting, 1)
   648  		// this should stop running goroutines
   649  		if !preemptall() {
   650  			break // no running goroutines
   651  		}
   652  		usleep(1000)
   653  	}
   654  	// to be sure
   655  	usleep(1000)
   656  	preemptall()
   657  	usleep(1000)
   658  }
   659  
   660  func isscanstatus(status uint32) bool {
   661  	if status == _Gscan {
   662  		throw("isscanstatus: Bad status Gscan")
   663  	}
   664  	return status&_Gscan == _Gscan
   665  }
   666  
   667  // All reads and writes of g's status go through readgstatus, casgstatus
   668  // castogscanstatus, casfrom_Gscanstatus.
   669  //go:nosplit
   670  func readgstatus(gp *g) uint32 {
   671  	return atomic.Load(&gp.atomicstatus)
   672  }
   673  
   674  // Ownership of gcscanvalid:
   675  //
   676  // If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
   677  // then gp owns gp.gcscanvalid, and other goroutines must not modify it.
   678  //
   679  // Otherwise, a second goroutine can lock the scan state by setting _Gscan
   680  // in the status bit and then modify gcscanvalid, and then unlock the scan state.
   681  //
   682  // Note that the first condition implies an exception to the second:
   683  // if a second goroutine changes gp's status to _Grunning|_Gscan,
   684  // that second goroutine still does not have the right to modify gcscanvalid.
   685  
   686  // The Gscanstatuses are acting like locks and this releases them.
   687  // If it proves to be a performance hit we should be able to make these
   688  // simple atomic stores but for now we are going to throw if
   689  // we see an inconsistent state.
   690  func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
   691  	success := false
   692  
   693  	// Check that transition is valid.
   694  	switch oldval {
   695  	default:
   696  		print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
   697  		dumpgstatus(gp)
   698  		throw("casfrom_Gscanstatus:top gp->status is not in scan state")
   699  	case _Gscanrunnable,
   700  		_Gscanwaiting,
   701  		_Gscanrunning,
   702  		_Gscansyscall:
   703  		if newval == oldval&^_Gscan {
   704  			success = atomic.Cas(&gp.atomicstatus, oldval, newval)
   705  		}
   706  	}
   707  	if !success {
   708  		print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
   709  		dumpgstatus(gp)
   710  		throw("casfrom_Gscanstatus: gp->status is not in scan state")
   711  	}
   712  }
   713  
   714  // This will return false if the gp is not in the expected status and the cas fails.
   715  // This acts like a lock acquire while the casfromgstatus acts like a lock release.
   716  func castogscanstatus(gp *g, oldval, newval uint32) bool {
   717  	switch oldval {
   718  	case _Grunnable,
   719  		_Grunning,
   720  		_Gwaiting,
   721  		_Gsyscall:
   722  		if newval == oldval|_Gscan {
   723  			return atomic.Cas(&gp.atomicstatus, oldval, newval)
   724  		}
   725  	}
   726  	print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
   727  	throw("castogscanstatus")
   728  	panic("not reached")
   729  }
   730  
   731  // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
   732  // and casfrom_Gscanstatus instead.
   733  // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
   734  // put it in the Gscan state is finished.
   735  //go:nosplit
   736  func casgstatus(gp *g, oldval, newval uint32) {
   737  	if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
   738  		systemstack(func() {
   739  			print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
   740  			throw("casgstatus: bad incoming values")
   741  		})
   742  	}
   743  
   744  	if oldval == _Grunning && gp.gcscanvalid {
   745  		// If oldvall == _Grunning, then the actual status must be
   746  		// _Grunning or _Grunning|_Gscan; either way,
   747  		// we own gp.gcscanvalid, so it's safe to read.
   748  		// gp.gcscanvalid must not be true when we are running.
   749  		print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
   750  		throw("casgstatus")
   751  	}
   752  
   753  	// See http://golang.org/cl/21503 for justification of the yield delay.
   754  	const yieldDelay = 5 * 1000
   755  	var nextYield int64
   756  
   757  	// loop if gp->atomicstatus is in a scan state giving
   758  	// GC time to finish and change the state to oldval.
   759  	for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
   760  		if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
   761  			systemstack(func() {
   762  				throw("casgstatus: waiting for Gwaiting but is Grunnable")
   763  			})
   764  		}
   765  		// Help GC if needed.
   766  		// if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
   767  		// 	gp.preemptscan = false
   768  		// 	systemstack(func() {
   769  		// 		gcphasework(gp)
   770  		// 	})
   771  		// }
   772  		// But meanwhile just yield.
   773  		if i == 0 {
   774  			nextYield = nanotime() + yieldDelay
   775  		}
   776  		if nanotime() < nextYield {
   777  			for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
   778  				procyield(1)
   779  			}
   780  		} else {
   781  			osyield()
   782  			nextYield = nanotime() + yieldDelay/2
   783  		}
   784  	}
   785  	if newval == _Grunning && gp.gcscanvalid {
   786  		// Run queueRescan on the system stack so it has more space.
   787  		systemstack(func() { queueRescan(gp) })
   788  	}
   789  }
   790  
   791  // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
   792  // Returns old status. Cannot call casgstatus directly, because we are racing with an
   793  // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
   794  // it might have become Grunnable by the time we get to the cas. If we called casgstatus,
   795  // it would loop waiting for the status to go back to Gwaiting, which it never will.
   796  //go:nosplit
   797  func casgcopystack(gp *g) uint32 {
   798  	for {
   799  		oldstatus := readgstatus(gp) &^ _Gscan
   800  		if oldstatus != _Gwaiting && oldstatus != _Grunnable {
   801  			throw("copystack: bad status, not Gwaiting or Grunnable")
   802  		}
   803  		if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
   804  			return oldstatus
   805  		}
   806  	}
   807  }
   808  
   809  // scang blocks until gp's stack has been scanned.
   810  // It might be scanned by scang or it might be scanned by the goroutine itself.
   811  // Either way, the stack scan has completed when scang returns.
   812  func scang(gp *g, gcw *gcWork) {
   813  	// Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
   814  	// Nothing is racing with us now, but gcscandone might be set to true left over
   815  	// from an earlier round of stack scanning (we scan twice per GC).
   816  	// We use gcscandone to record whether the scan has been done during this round.
   817  	// It is important that the scan happens exactly once: if called twice,
   818  	// the installation of stack barriers will detect the double scan and die.
   819  
   820  	gp.gcscandone = false
   821  
   822  	// See http://golang.org/cl/21503 for justification of the yield delay.
   823  	const yieldDelay = 10 * 1000
   824  	var nextYield int64
   825  
   826  	// Endeavor to get gcscandone set to true,
   827  	// either by doing the stack scan ourselves or by coercing gp to scan itself.
   828  	// gp.gcscandone can transition from false to true when we're not looking
   829  	// (if we asked for preemption), so any time we lock the status using
   830  	// castogscanstatus we have to double-check that the scan is still not done.
   831  loop:
   832  	for i := 0; !gp.gcscandone; i++ {
   833  		switch s := readgstatus(gp); s {
   834  		default:
   835  			dumpgstatus(gp)
   836  			throw("stopg: invalid status")
   837  
   838  		case _Gdead:
   839  			// No stack.
   840  			gp.gcscandone = true
   841  			break loop
   842  
   843  		case _Gcopystack:
   844  		// Stack being switched. Go around again.
   845  
   846  		case _Grunnable, _Gsyscall, _Gwaiting:
   847  			// Claim goroutine by setting scan bit.
   848  			// Racing with execution or readying of gp.
   849  			// The scan bit keeps them from running
   850  			// the goroutine until we're done.
   851  			if castogscanstatus(gp, s, s|_Gscan) {
   852  				if !gp.gcscandone {
   853  					scanstack(gp, gcw)
   854  					gp.gcscandone = true
   855  				}
   856  				restartg(gp)
   857  				break loop
   858  			}
   859  
   860  		case _Gscanwaiting:
   861  		// newstack is doing a scan for us right now. Wait.
   862  
   863  		case _Grunning:
   864  			// Goroutine running. Try to preempt execution so it can scan itself.
   865  			// The preemption handler (in newstack) does the actual scan.
   866  
   867  			// Optimization: if there is already a pending preemption request
   868  			// (from the previous loop iteration), don't bother with the atomics.
   869  			if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt {
   870  				break
   871  			}
   872  
   873  			// Ask for preemption and self scan.
   874  			if castogscanstatus(gp, _Grunning, _Gscanrunning) {
   875  				if !gp.gcscandone {
   876  					gp.preemptscan = true
   877  					gp.preempt = true
   878  					gp.stackguard0 = stackPreempt
   879  				}
   880  				casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
   881  			}
   882  		}
   883  
   884  		if i == 0 {
   885  			nextYield = nanotime() + yieldDelay
   886  		}
   887  		if nanotime() < nextYield {
   888  			procyield(10)
   889  		} else {
   890  			osyield()
   891  			nextYield = nanotime() + yieldDelay/2
   892  		}
   893  	}
   894  
   895  	gp.preemptscan = false // cancel scan request if no longer needed
   896  }
   897  
   898  // The GC requests that this routine be moved from a scanmumble state to a mumble state.
   899  func restartg(gp *g) {
   900  	s := readgstatus(gp)
   901  	switch s {
   902  	default:
   903  		dumpgstatus(gp)
   904  		throw("restartg: unexpected status")
   905  
   906  	case _Gdead:
   907  	// ok
   908  
   909  	case _Gscanrunnable,
   910  		_Gscanwaiting,
   911  		_Gscansyscall:
   912  		casfrom_Gscanstatus(gp, s, s&^_Gscan)
   913  	}
   914  }
   915  
   916  // stopTheWorld stops all P's from executing goroutines, interrupting
   917  // all goroutines at GC safe points and records reason as the reason
   918  // for the stop. On return, only the current goroutine's P is running.
   919  // stopTheWorld must not be called from a system stack and the caller
   920  // must not hold worldsema. The caller must call startTheWorld when
   921  // other P's should resume execution.
   922  //
   923  // stopTheWorld is safe for multiple goroutines to call at the
   924  // same time. Each will execute its own stop, and the stops will
   925  // be serialized.
   926  //
   927  // This is also used by routines that do stack dumps. If the system is
   928  // in panic or being exited, this may not reliably stop all
   929  // goroutines.
   930  func stopTheWorld(reason string) {
   931  	semacquire(&worldsema, 0)
   932  	getg().m.preemptoff = reason
   933  	systemstack(stopTheWorldWithSema)
   934  }
   935  
   936  // startTheWorld undoes the effects of stopTheWorld.
   937  func startTheWorld() {
   938  	systemstack(startTheWorldWithSema)
   939  	// worldsema must be held over startTheWorldWithSema to ensure
   940  	// gomaxprocs cannot change while worldsema is held.
   941  	semrelease(&worldsema)
   942  	getg().m.preemptoff = ""
   943  }
   944  
   945  // Holding worldsema grants an M the right to try to stop the world
   946  // and prevents gomaxprocs from changing concurrently.
   947  var worldsema uint32 = 1
   948  
   949  // stopTheWorldWithSema is the core implementation of stopTheWorld.
   950  // The caller is responsible for acquiring worldsema and disabling
   951  // preemption first and then should stopTheWorldWithSema on the system
   952  // stack:
   953  //
   954  //	semacquire(&worldsema, 0)
   955  //	m.preemptoff = "reason"
   956  //	systemstack(stopTheWorldWithSema)
   957  //
   958  // When finished, the caller must either call startTheWorld or undo
   959  // these three operations separately:
   960  //
   961  //	m.preemptoff = ""
   962  //	systemstack(startTheWorldWithSema)
   963  //	semrelease(&worldsema)
   964  //
   965  // It is allowed to acquire worldsema once and then execute multiple
   966  // startTheWorldWithSema/stopTheWorldWithSema pairs.
   967  // Other P's are able to execute between successive calls to
   968  // startTheWorldWithSema and stopTheWorldWithSema.
   969  // Holding worldsema causes any other goroutines invoking
   970  // stopTheWorld to block.
   971  func stopTheWorldWithSema() {
   972  	_g_ := getg()
   973  
   974  	// If we hold a lock, then we won't be able to stop another M
   975  	// that is blocked trying to acquire the lock.
   976  	if _g_.m.locks > 0 {
   977  		throw("stopTheWorld: holding locks")
   978  	}
   979  
   980  	lock(&sched.lock)
   981  	sched.stopwait = gomaxprocs
   982  	atomic.Store(&sched.gcwaiting, 1)
   983  	preemptall()
   984  	// stop current P
   985  	_g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
   986  	sched.stopwait--
   987  	// try to retake all P's in Psyscall status
   988  	for i := 0; i < int(gomaxprocs); i++ {
   989  		p := allp[i]
   990  		s := p.status
   991  		if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
   992  			if trace.enabled {
   993  				traceGoSysBlock(p)
   994  				traceProcStop(p)
   995  			}
   996  			p.syscalltick++
   997  			sched.stopwait--
   998  		}
   999  	}
  1000  	// stop idle P's
  1001  	for {
  1002  		p := pidleget()
  1003  		if p == nil {
  1004  			break
  1005  		}
  1006  		p.status = _Pgcstop
  1007  		sched.stopwait--
  1008  	}
  1009  	wait := sched.stopwait > 0
  1010  	unlock(&sched.lock)
  1011  
  1012  	// wait for remaining P's to stop voluntarily
  1013  	if wait {
  1014  		for {
  1015  			// wait for 100us, then try to re-preempt in case of any races
  1016  			if notetsleep(&sched.stopnote, 100*1000) {
  1017  				noteclear(&sched.stopnote)
  1018  				break
  1019  			}
  1020  			preemptall()
  1021  		}
  1022  	}
  1023  	if sched.stopwait != 0 {
  1024  		throw("stopTheWorld: not stopped")
  1025  	}
  1026  	for i := 0; i < int(gomaxprocs); i++ {
  1027  		p := allp[i]
  1028  		if p.status != _Pgcstop {
  1029  			throw("stopTheWorld: not stopped")
  1030  		}
  1031  	}
  1032  }
  1033  
  1034  func mhelpgc() {
  1035  	_g_ := getg()
  1036  	_g_.m.helpgc = -1
  1037  }
  1038  
  1039  func startTheWorldWithSema() {
  1040  	_g_ := getg()
  1041  
  1042  	_g_.m.locks++        // disable preemption because it can be holding p in a local var
  1043  	gp := netpoll(false) // non-blocking
  1044  	injectglist(gp)
  1045  	add := needaddgcproc()
  1046  	lock(&sched.lock)
  1047  
  1048  	procs := gomaxprocs
  1049  	if newprocs != 0 {
  1050  		procs = newprocs
  1051  		newprocs = 0
  1052  	}
  1053  	p1 := procresize(procs)
  1054  	sched.gcwaiting = 0
  1055  	if sched.sysmonwait != 0 {
  1056  		sched.sysmonwait = 0
  1057  		notewakeup(&sched.sysmonnote)
  1058  	}
  1059  	unlock(&sched.lock)
  1060  
  1061  	for p1 != nil {
  1062  		p := p1
  1063  		p1 = p1.link.ptr()
  1064  		if p.m != 0 {
  1065  			mp := p.m.ptr()
  1066  			p.m = 0
  1067  			if mp.nextp != 0 {
  1068  				throw("startTheWorld: inconsistent mp->nextp")
  1069  			}
  1070  			mp.nextp.set(p)
  1071  			notewakeup(&mp.park)
  1072  		} else {
  1073  			// Start M to run P.  Do not start another M below.
  1074  			newm(nil, p)
  1075  			add = false
  1076  		}
  1077  	}
  1078  
  1079  	// Wakeup an additional proc in case we have excessive runnable goroutines
  1080  	// in local queues or in the global queue. If we don't, the proc will park itself.
  1081  	// If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
  1082  	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
  1083  		wakep()
  1084  	}
  1085  
  1086  	if add {
  1087  		// If GC could have used another helper proc, start one now,
  1088  		// in the hope that it will be available next time.
  1089  		// It would have been even better to start it before the collection,
  1090  		// but doing so requires allocating memory, so it's tricky to
  1091  		// coordinate. This lazy approach works out in practice:
  1092  		// we don't mind if the first couple gc rounds don't have quite
  1093  		// the maximum number of procs.
  1094  		newm(mhelpgc, nil)
  1095  	}
  1096  	_g_.m.locks--
  1097  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
  1098  		_g_.stackguard0 = stackPreempt
  1099  	}
  1100  }
  1101  
  1102  // Called to start an M.
  1103  //go:nosplit
  1104  func mstart() {
  1105  	_g_ := getg()
  1106  
  1107  	if _g_.stack.lo == 0 {
  1108  		// Initialize stack bounds from system stack.
  1109  		// Cgo may have left stack size in stack.hi.
  1110  		size := _g_.stack.hi
  1111  		if size == 0 {
  1112  			size = 8192 * sys.StackGuardMultiplier
  1113  		}
  1114  		_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
  1115  		_g_.stack.lo = _g_.stack.hi - size + 1024
  1116  	}
  1117  	// Initialize stack guards so that we can start calling
  1118  	// both Go and C functions with stack growth prologues.
  1119  	_g_.stackguard0 = _g_.stack.lo + _StackGuard
  1120  	_g_.stackguard1 = _g_.stackguard0
  1121  	mstart1()
  1122  }
  1123  
  1124  func mstart1() {
  1125  	_g_ := getg()
  1126  
  1127  	if _g_ != _g_.m.g0 {
  1128  		throw("bad runtime·mstart")
  1129  	}
  1130  
  1131  	// Record top of stack for use by mcall.
  1132  	// Once we call schedule we're never coming back,
  1133  	// so other calls can reuse this stack space.
  1134  	gosave(&_g_.m.g0.sched)
  1135  	_g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used
  1136  	asminit()
  1137  	minit()
  1138  
  1139  	// Install signal handlers; after minit so that minit can
  1140  	// prepare the thread to be able to handle the signals.
  1141  	if _g_.m == &m0 {
  1142  		// Create an extra M for callbacks on threads not created by Go.
  1143  		if iscgo && !cgoHasExtraM {
  1144  			cgoHasExtraM = true
  1145  			newextram()
  1146  		}
  1147  		initsig(false)
  1148  	}
  1149  
  1150  	if fn := _g_.m.mstartfn; fn != nil {
  1151  		fn()
  1152  	}
  1153  
  1154  	if _g_.m.helpgc != 0 {
  1155  		_g_.m.helpgc = 0
  1156  		stopm()
  1157  	} else if _g_.m != &m0 {
  1158  		acquirep(_g_.m.nextp.ptr())
  1159  		_g_.m.nextp = 0
  1160  	}
  1161  	schedule()
  1162  }
  1163  
  1164  // forEachP calls fn(p) for every P p when p reaches a GC safe point.
  1165  // If a P is currently executing code, this will bring the P to a GC
  1166  // safe point and execute fn on that P. If the P is not executing code
  1167  // (it is idle or in a syscall), this will call fn(p) directly while
  1168  // preventing the P from exiting its state. This does not ensure that
  1169  // fn will run on every CPU executing Go code, but it acts as a global
  1170  // memory barrier. GC uses this as a "ragged barrier."
  1171  //
  1172  // The caller must hold worldsema.
  1173  //
  1174  //go:systemstack
  1175  func forEachP(fn func(*p)) {
  1176  	mp := acquirem()
  1177  	_p_ := getg().m.p.ptr()
  1178  
  1179  	lock(&sched.lock)
  1180  	if sched.safePointWait != 0 {
  1181  		throw("forEachP: sched.safePointWait != 0")
  1182  	}
  1183  	sched.safePointWait = gomaxprocs - 1
  1184  	sched.safePointFn = fn
  1185  
  1186  	// Ask all Ps to run the safe point function.
  1187  	for _, p := range allp[:gomaxprocs] {
  1188  		if p != _p_ {
  1189  			atomic.Store(&p.runSafePointFn, 1)
  1190  		}
  1191  	}
  1192  	preemptall()
  1193  
  1194  	// Any P entering _Pidle or _Psyscall from now on will observe
  1195  	// p.runSafePointFn == 1 and will call runSafePointFn when
  1196  	// changing its status to _Pidle/_Psyscall.
  1197  
  1198  	// Run safe point function for all idle Ps. sched.pidle will
  1199  	// not change because we hold sched.lock.
  1200  	for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
  1201  		if atomic.Cas(&p.runSafePointFn, 1, 0) {
  1202  			fn(p)
  1203  			sched.safePointWait--
  1204  		}
  1205  	}
  1206  
  1207  	wait := sched.safePointWait > 0
  1208  	unlock(&sched.lock)
  1209  
  1210  	// Run fn for the current P.
  1211  	fn(_p_)
  1212  
  1213  	// Force Ps currently in _Psyscall into _Pidle and hand them
  1214  	// off to induce safe point function execution.
  1215  	for i := 0; i < int(gomaxprocs); i++ {
  1216  		p := allp[i]
  1217  		s := p.status
  1218  		if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
  1219  			if trace.enabled {
  1220  				traceGoSysBlock(p)
  1221  				traceProcStop(p)
  1222  			}
  1223  			p.syscalltick++
  1224  			handoffp(p)
  1225  		}
  1226  	}
  1227  
  1228  	// Wait for remaining Ps to run fn.
  1229  	if wait {
  1230  		for {
  1231  			// Wait for 100us, then try to re-preempt in
  1232  			// case of any races.
  1233  			//
  1234  			// Requires system stack.
  1235  			if notetsleep(&sched.safePointNote, 100*1000) {
  1236  				noteclear(&sched.safePointNote)
  1237  				break
  1238  			}
  1239  			preemptall()
  1240  		}
  1241  	}
  1242  	if sched.safePointWait != 0 {
  1243  		throw("forEachP: not done")
  1244  	}
  1245  	for i := 0; i < int(gomaxprocs); i++ {
  1246  		p := allp[i]
  1247  		if p.runSafePointFn != 0 {
  1248  			throw("forEachP: P did not run fn")
  1249  		}
  1250  	}
  1251  
  1252  	lock(&sched.lock)
  1253  	sched.safePointFn = nil
  1254  	unlock(&sched.lock)
  1255  	releasem(mp)
  1256  }
  1257  
  1258  // runSafePointFn runs the safe point function, if any, for this P.
  1259  // This should be called like
  1260  //
  1261  //     if getg().m.p.runSafePointFn != 0 {
  1262  //         runSafePointFn()
  1263  //     }
  1264  //
  1265  // runSafePointFn must be checked on any transition in to _Pidle or
  1266  // _Psyscall to avoid a race where forEachP sees that the P is running
  1267  // just before the P goes into _Pidle/_Psyscall and neither forEachP
  1268  // nor the P run the safe-point function.
  1269  func runSafePointFn() {
  1270  	p := getg().m.p.ptr()
  1271  	// Resolve the race between forEachP running the safe-point
  1272  	// function on this P's behalf and this P running the
  1273  	// safe-point function directly.
  1274  	if !atomic.Cas(&p.runSafePointFn, 1, 0) {
  1275  		return
  1276  	}
  1277  	sched.safePointFn(p)
  1278  	lock(&sched.lock)
  1279  	sched.safePointWait--
  1280  	if sched.safePointWait == 0 {
  1281  		notewakeup(&sched.safePointNote)
  1282  	}
  1283  	unlock(&sched.lock)
  1284  }
  1285  
  1286  // When running with cgo, we call _cgo_thread_start
  1287  // to start threads for us so that we can play nicely with
  1288  // foreign code.
  1289  var cgoThreadStart unsafe.Pointer
  1290  
  1291  type cgothreadstart struct {
  1292  	g   guintptr
  1293  	tls *uint64
  1294  	fn  unsafe.Pointer
  1295  }
  1296  
  1297  // Allocate a new m unassociated with any thread.
  1298  // Can use p for allocation context if needed.
  1299  // fn is recorded as the new m's m.mstartfn.
  1300  //
  1301  // This function is allowed to have write barriers even if the caller
  1302  // isn't because it borrows _p_.
  1303  //
  1304  //go:yeswritebarrierrec
  1305  func allocm(_p_ *p, fn func()) *m {
  1306  	_g_ := getg()
  1307  	_g_.m.locks++ // disable GC because it can be called from sysmon
  1308  	if _g_.m.p == 0 {
  1309  		acquirep(_p_) // temporarily borrow p for mallocs in this function
  1310  	}
  1311  	mp := new(m)
  1312  	mp.mstartfn = fn
  1313  	mcommoninit(mp)
  1314  
  1315  	// In case of cgo or Solaris, pthread_create will make us a stack.
  1316  	// Windows and Plan 9 will layout sched stack on OS stack.
  1317  	if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" {
  1318  		mp.g0 = malg(-1)
  1319  	} else {
  1320  		mp.g0 = malg(8192 * sys.StackGuardMultiplier)
  1321  	}
  1322  	mp.g0.m = mp
  1323  
  1324  	if _p_ == _g_.m.p.ptr() {
  1325  		releasep()
  1326  	}
  1327  	_g_.m.locks--
  1328  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
  1329  		_g_.stackguard0 = stackPreempt
  1330  	}
  1331  
  1332  	return mp
  1333  }
  1334  
  1335  // needm is called when a cgo callback happens on a
  1336  // thread without an m (a thread not created by Go).
  1337  // In this case, needm is expected to find an m to use
  1338  // and return with m, g initialized correctly.
  1339  // Since m and g are not set now (likely nil, but see below)
  1340  // needm is limited in what routines it can call. In particular
  1341  // it can only call nosplit functions (textflag 7) and cannot
  1342  // do any scheduling that requires an m.
  1343  //
  1344  // In order to avoid needing heavy lifting here, we adopt
  1345  // the following strategy: there is a stack of available m's
  1346  // that can be stolen. Using compare-and-swap
  1347  // to pop from the stack has ABA races, so we simulate
  1348  // a lock by doing an exchange (via casp) to steal the stack
  1349  // head and replace the top pointer with MLOCKED (1).
  1350  // This serves as a simple spin lock that we can use even
  1351  // without an m. The thread that locks the stack in this way
  1352  // unlocks the stack by storing a valid stack head pointer.
  1353  //
  1354  // In order to make sure that there is always an m structure
  1355  // available to be stolen, we maintain the invariant that there
  1356  // is always one more than needed. At the beginning of the
  1357  // program (if cgo is in use) the list is seeded with a single m.
  1358  // If needm finds that it has taken the last m off the list, its job
  1359  // is - once it has installed its own m so that it can do things like
  1360  // allocate memory - to create a spare m and put it on the list.
  1361  //
  1362  // Each of these extra m's also has a g0 and a curg that are
  1363  // pressed into service as the scheduling stack and current
  1364  // goroutine for the duration of the cgo callback.
  1365  //
  1366  // When the callback is done with the m, it calls dropm to
  1367  // put the m back on the list.
  1368  //go:nosplit
  1369  func needm(x byte) {
  1370  	if iscgo && !cgoHasExtraM {
  1371  		// Can happen if C/C++ code calls Go from a global ctor.
  1372  		// Can not throw, because scheduler is not initialized yet.
  1373  		write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
  1374  		exit(1)
  1375  	}
  1376  
  1377  	// Lock extra list, take head, unlock popped list.
  1378  	// nilokay=false is safe here because of the invariant above,
  1379  	// that the extra list always contains or will soon contain
  1380  	// at least one m.
  1381  	mp := lockextra(false)
  1382  
  1383  	// Set needextram when we've just emptied the list,
  1384  	// so that the eventual call into cgocallbackg will
  1385  	// allocate a new m for the extra list. We delay the
  1386  	// allocation until then so that it can be done
  1387  	// after exitsyscall makes sure it is okay to be
  1388  	// running at all (that is, there's no garbage collection
  1389  	// running right now).
  1390  	mp.needextram = mp.schedlink == 0
  1391  	unlockextra(mp.schedlink.ptr())
  1392  
  1393  	// Save and block signals before installing g.
  1394  	// Once g is installed, any incoming signals will try to execute,
  1395  	// but we won't have the sigaltstack settings and other data
  1396  	// set up appropriately until the end of minit, which will
  1397  	// unblock the signals. This is the same dance as when
  1398  	// starting a new m to run Go code via newosproc.
  1399  	msigsave(mp)
  1400  	sigblock()
  1401  
  1402  	// Install g (= m->g0) and set the stack bounds
  1403  	// to match the current stack. We don't actually know
  1404  	// how big the stack is, like we don't know how big any
  1405  	// scheduling stack is, but we assume there's at least 32 kB,
  1406  	// which is more than enough for us.
  1407  	setg(mp.g0)
  1408  	_g_ := getg()
  1409  	_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
  1410  	_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
  1411  	_g_.stackguard0 = _g_.stack.lo + _StackGuard
  1412  
  1413  	// Initialize this thread to use the m.
  1414  	asminit()
  1415  	minit()
  1416  }
  1417  
  1418  var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
  1419  
  1420  // newextram allocates m's and puts them on the extra list.
  1421  // It is called with a working local m, so that it can do things
  1422  // like call schedlock and allocate.
  1423  func newextram() {
  1424  	c := atomic.Xchg(&extraMWaiters, 0)
  1425  	if c > 0 {
  1426  		for i := uint32(0); i < c; i++ {
  1427  			oneNewExtraM()
  1428  		}
  1429  	} else {
  1430  		// Make sure there is at least one extra M.
  1431  		mp := lockextra(true)
  1432  		unlockextra(mp)
  1433  		if mp == nil {
  1434  			oneNewExtraM()
  1435  		}
  1436  	}
  1437  }
  1438  
  1439  // oneNewExtraM allocates an m and puts it on the extra list.
  1440  func oneNewExtraM() {
  1441  	// Create extra goroutine locked to extra m.
  1442  	// The goroutine is the context in which the cgo callback will run.
  1443  	// The sched.pc will never be returned to, but setting it to
  1444  	// goexit makes clear to the traceback routines where
  1445  	// the goroutine stack ends.
  1446  	mp := allocm(nil, nil)
  1447  	gp := malg(4096)
  1448  	gp.sched.pc = funcPC(goexit) + sys.PCQuantum
  1449  	gp.sched.sp = gp.stack.hi
  1450  	gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame
  1451  	gp.sched.lr = 0
  1452  	gp.sched.g = guintptr(unsafe.Pointer(gp))
  1453  	gp.syscallpc = gp.sched.pc
  1454  	gp.syscallsp = gp.sched.sp
  1455  	gp.stktopsp = gp.sched.sp
  1456  	gp.gcscanvalid = true // fresh G, so no dequeueRescan necessary
  1457  	gp.gcscandone = true
  1458  	gp.gcRescan = -1
  1459  	// malg returns status as Gidle, change to Gsyscall before adding to allg
  1460  	// where GC will see it.
  1461  	casgstatus(gp, _Gidle, _Gsyscall)
  1462  	gp.m = mp
  1463  	mp.curg = gp
  1464  	mp.locked = _LockInternal
  1465  	mp.lockedg = gp
  1466  	gp.lockedm = mp
  1467  	gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
  1468  	if raceenabled {
  1469  		gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
  1470  	}
  1471  	// put on allg for garbage collector
  1472  	allgadd(gp)
  1473  
  1474  	// Add m to the extra list.
  1475  	mnext := lockextra(true)
  1476  	mp.schedlink.set(mnext)
  1477  	unlockextra(mp)
  1478  }
  1479  
  1480  // dropm is called when a cgo callback has called needm but is now
  1481  // done with the callback and returning back into the non-Go thread.
  1482  // It puts the current m back onto the extra list.
  1483  //
  1484  // The main expense here is the call to signalstack to release the
  1485  // m's signal stack, and then the call to needm on the next callback
  1486  // from this thread. It is tempting to try to save the m for next time,
  1487  // which would eliminate both these costs, but there might not be
  1488  // a next time: the current thread (which Go does not control) might exit.
  1489  // If we saved the m for that thread, there would be an m leak each time
  1490  // such a thread exited. Instead, we acquire and release an m on each
  1491  // call. These should typically not be scheduling operations, just a few
  1492  // atomics, so the cost should be small.
  1493  //
  1494  // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
  1495  // variable using pthread_key_create. Unlike the pthread keys we already use
  1496  // on OS X, this dummy key would never be read by Go code. It would exist
  1497  // only so that we could register at thread-exit-time destructor.
  1498  // That destructor would put the m back onto the extra list.
  1499  // This is purely a performance optimization. The current version,
  1500  // in which dropm happens on each cgo call, is still correct too.
  1501  // We may have to keep the current version on systems with cgo
  1502  // but without pthreads, like Windows.
  1503  func dropm() {
  1504  	// Clear m and g, and return m to the extra list.
  1505  	// After the call to setg we can only call nosplit functions
  1506  	// with no pointer manipulation.
  1507  	mp := getg().m
  1508  
  1509  	// Block signals before unminit.
  1510  	// Unminit unregisters the signal handling stack (but needs g on some systems).
  1511  	// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
  1512  	// It's important not to try to handle a signal between those two steps.
  1513  	sigmask := mp.sigmask
  1514  	sigblock()
  1515  	unminit()
  1516  
  1517  	mnext := lockextra(true)
  1518  	mp.schedlink.set(mnext)
  1519  
  1520  	setg(nil)
  1521  
  1522  	// Commit the release of mp.
  1523  	unlockextra(mp)
  1524  
  1525  	msigrestore(sigmask)
  1526  }
  1527  
  1528  // A helper function for EnsureDropM.
  1529  func getm() uintptr {
  1530  	return uintptr(unsafe.Pointer(getg().m))
  1531  }
  1532  
  1533  var extram uintptr
  1534  var extraMWaiters uint32
  1535  
  1536  // lockextra locks the extra list and returns the list head.
  1537  // The caller must unlock the list by storing a new list head
  1538  // to extram. If nilokay is true, then lockextra will
  1539  // return a nil list head if that's what it finds. If nilokay is false,
  1540  // lockextra will keep waiting until the list head is no longer nil.
  1541  //go:nosplit
  1542  func lockextra(nilokay bool) *m {
  1543  	const locked = 1
  1544  
  1545  	incr := false
  1546  	for {
  1547  		old := atomic.Loaduintptr(&extram)
  1548  		if old == locked {
  1549  			yield := osyield
  1550  			yield()
  1551  			continue
  1552  		}
  1553  		if old == 0 && !nilokay {
  1554  			if !incr {
  1555  				// Add 1 to the number of threads
  1556  				// waiting for an M.
  1557  				// This is cleared by newextram.
  1558  				atomic.Xadd(&extraMWaiters, 1)
  1559  				incr = true
  1560  			}
  1561  			usleep(1)
  1562  			continue
  1563  		}
  1564  		if atomic.Casuintptr(&extram, old, locked) {
  1565  			return (*m)(unsafe.Pointer(old))
  1566  		}
  1567  		yield := osyield
  1568  		yield()
  1569  		continue
  1570  	}
  1571  }
  1572  
  1573  //go:nosplit
  1574  func unlockextra(mp *m) {
  1575  	atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
  1576  }
  1577  
  1578  // Create a new m. It will start off with a call to fn, or else the scheduler.
  1579  // fn needs to be static and not a heap allocated closure.
  1580  // May run with m.p==nil, so write barriers are not allowed.
  1581  //go:nowritebarrierrec
  1582  func newm(fn func(), _p_ *p) {
  1583  	mp := allocm(_p_, fn)
  1584  	mp.nextp.set(_p_)
  1585  	mp.sigmask = initSigmask
  1586  	if iscgo {
  1587  		var ts cgothreadstart
  1588  		if _cgo_thread_start == nil {
  1589  			throw("_cgo_thread_start missing")
  1590  		}
  1591  		ts.g.set(mp.g0)
  1592  		ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
  1593  		ts.fn = unsafe.Pointer(funcPC(mstart))
  1594  		if msanenabled {
  1595  			msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
  1596  		}
  1597  		asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
  1598  		return
  1599  	}
  1600  	newosproc(mp, unsafe.Pointer(mp.g0.stack.hi))
  1601  }
  1602  
  1603  // Stops execution of the current m until new work is available.
  1604  // Returns with acquired P.
  1605  func stopm() {
  1606  	_g_ := getg()
  1607  
  1608  	if _g_.m.locks != 0 {
  1609  		throw("stopm holding locks")
  1610  	}
  1611  	if _g_.m.p != 0 {
  1612  		throw("stopm holding p")
  1613  	}
  1614  	if _g_.m.spinning {
  1615  		throw("stopm spinning")
  1616  	}
  1617  
  1618  retry:
  1619  	lock(&sched.lock)
  1620  	mput(_g_.m)
  1621  	unlock(&sched.lock)
  1622  	notesleep(&_g_.m.park)
  1623  	noteclear(&_g_.m.park)
  1624  	if _g_.m.helpgc != 0 {
  1625  		gchelper()
  1626  		_g_.m.helpgc = 0
  1627  		_g_.m.mcache = nil
  1628  		_g_.m.p = 0
  1629  		goto retry
  1630  	}
  1631  	acquirep(_g_.m.nextp.ptr())
  1632  	_g_.m.nextp = 0
  1633  }
  1634  
  1635  func mspinning() {
  1636  	// startm's caller incremented nmspinning. Set the new M's spinning.
  1637  	getg().m.spinning = true
  1638  }
  1639  
  1640  // Schedules some M to run the p (creates an M if necessary).
  1641  // If p==nil, tries to get an idle P, if no idle P's does nothing.
  1642  // May run with m.p==nil, so write barriers are not allowed.
  1643  // If spinning is set, the caller has incremented nmspinning and startm will
  1644  // either decrement nmspinning or set m.spinning in the newly started M.
  1645  //go:nowritebarrierrec
  1646  func startm(_p_ *p, spinning bool) {
  1647  	lock(&sched.lock)
  1648  	if _p_ == nil {
  1649  		_p_ = pidleget()
  1650  		if _p_ == nil {
  1651  			unlock(&sched.lock)
  1652  			if spinning {
  1653  				// The caller incremented nmspinning, but there are no idle Ps,
  1654  				// so it's okay to just undo the increment and give up.
  1655  				if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
  1656  					throw("startm: negative nmspinning")
  1657  				}
  1658  			}
  1659  			return
  1660  		}
  1661  	}
  1662  	mp := mget()
  1663  	unlock(&sched.lock)
  1664  	if mp == nil {
  1665  		var fn func()
  1666  		if spinning {
  1667  			// The caller incremented nmspinning, so set m.spinning in the new M.
  1668  			fn = mspinning
  1669  		}
  1670  		newm(fn, _p_)
  1671  		return
  1672  	}
  1673  	if mp.spinning {
  1674  		throw("startm: m is spinning")
  1675  	}
  1676  	if mp.nextp != 0 {
  1677  		throw("startm: m has p")
  1678  	}
  1679  	if spinning && !runqempty(_p_) {
  1680  		throw("startm: p has runnable gs")
  1681  	}
  1682  	// The caller incremented nmspinning, so set m.spinning in the new M.
  1683  	mp.spinning = spinning
  1684  	mp.nextp.set(_p_)
  1685  	notewakeup(&mp.park)
  1686  }
  1687  
  1688  // Hands off P from syscall or locked M.
  1689  // Always runs without a P, so write barriers are not allowed.
  1690  //go:nowritebarrierrec
  1691  func handoffp(_p_ *p) {
  1692  	// handoffp must start an M in any situation where
  1693  	// findrunnable would return a G to run on _p_.
  1694  
  1695  	// if it has local work, start it straight away
  1696  	if !runqempty(_p_) || sched.runqsize != 0 {
  1697  		startm(_p_, false)
  1698  		return
  1699  	}
  1700  	// if it has GC work, start it straight away
  1701  	if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
  1702  		startm(_p_, false)
  1703  		return
  1704  	}
  1705  	// no local work, check that there are no spinning/idle M's,
  1706  	// otherwise our help is not required
  1707  	if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
  1708  		startm(_p_, true)
  1709  		return
  1710  	}
  1711  	lock(&sched.lock)
  1712  	if sched.gcwaiting != 0 {
  1713  		_p_.status = _Pgcstop
  1714  		sched.stopwait--
  1715  		if sched.stopwait == 0 {
  1716  			notewakeup(&sched.stopnote)
  1717  		}
  1718  		unlock(&sched.lock)
  1719  		return
  1720  	}
  1721  	if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
  1722  		sched.safePointFn(_p_)
  1723  		sched.safePointWait--
  1724  		if sched.safePointWait == 0 {
  1725  			notewakeup(&sched.safePointNote)
  1726  		}
  1727  	}
  1728  	if sched.runqsize != 0 {
  1729  		unlock(&sched.lock)
  1730  		startm(_p_, false)
  1731  		return
  1732  	}
  1733  	// If this is the last running P and nobody is polling network,
  1734  	// need to wakeup another M to poll network.
  1735  	if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
  1736  		unlock(&sched.lock)
  1737  		startm(_p_, false)
  1738  		return
  1739  	}
  1740  	pidleput(_p_)
  1741  	unlock(&sched.lock)
  1742  }
  1743  
  1744  // Tries to add one more P to execute G's.
  1745  // Called when a G is made runnable (newproc, ready).
  1746  func wakep() {
  1747  	// be conservative about spinning threads
  1748  	if !atomic.Cas(&sched.nmspinning, 0, 1) {
  1749  		return
  1750  	}
  1751  	startm(nil, true)
  1752  }
  1753  
  1754  // Stops execution of the current m that is locked to a g until the g is runnable again.
  1755  // Returns with acquired P.
  1756  func stoplockedm() {
  1757  	_g_ := getg()
  1758  
  1759  	if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
  1760  		throw("stoplockedm: inconsistent locking")
  1761  	}
  1762  	if _g_.m.p != 0 {
  1763  		// Schedule another M to run this p.
  1764  		_p_ := releasep()
  1765  		handoffp(_p_)
  1766  	}
  1767  	incidlelocked(1)
  1768  	// Wait until another thread schedules lockedg again.
  1769  	notesleep(&_g_.m.park)
  1770  	noteclear(&_g_.m.park)
  1771  	status := readgstatus(_g_.m.lockedg)
  1772  	if status&^_Gscan != _Grunnable {
  1773  		print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
  1774  		dumpgstatus(_g_)
  1775  		throw("stoplockedm: not runnable")
  1776  	}
  1777  	acquirep(_g_.m.nextp.ptr())
  1778  	_g_.m.nextp = 0
  1779  }
  1780  
  1781  // Schedules the locked m to run the locked gp.
  1782  // May run during STW, so write barriers are not allowed.
  1783  //go:nowritebarrierrec
  1784  func startlockedm(gp *g) {
  1785  	_g_ := getg()
  1786  
  1787  	mp := gp.lockedm
  1788  	if mp == _g_.m {
  1789  		throw("startlockedm: locked to me")
  1790  	}
  1791  	if mp.nextp != 0 {
  1792  		throw("startlockedm: m has p")
  1793  	}
  1794  	// directly handoff current P to the locked m
  1795  	incidlelocked(-1)
  1796  	_p_ := releasep()
  1797  	mp.nextp.set(_p_)
  1798  	notewakeup(&mp.park)
  1799  	stopm()
  1800  }
  1801  
  1802  // Stops the current m for stopTheWorld.
  1803  // Returns when the world is restarted.
  1804  func gcstopm() {
  1805  	_g_ := getg()
  1806  
  1807  	if sched.gcwaiting == 0 {
  1808  		throw("gcstopm: not waiting for gc")
  1809  	}
  1810  	if _g_.m.spinning {
  1811  		_g_.m.spinning = false
  1812  		// OK to just drop nmspinning here,
  1813  		// startTheWorld will unpark threads as necessary.
  1814  		if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
  1815  			throw("gcstopm: negative nmspinning")
  1816  		}
  1817  	}
  1818  	_p_ := releasep()
  1819  	lock(&sched.lock)
  1820  	_p_.status = _Pgcstop
  1821  	sched.stopwait--
  1822  	if sched.stopwait == 0 {
  1823  		notewakeup(&sched.stopnote)
  1824  	}
  1825  	unlock(&sched.lock)
  1826  	stopm()
  1827  }
  1828  
  1829  // Schedules gp to run on the current M.
  1830  // If inheritTime is true, gp inherits the remaining time in the
  1831  // current time slice. Otherwise, it starts a new time slice.
  1832  // Never returns.
  1833  //
  1834  // Write barriers are allowed because this is called immediately after
  1835  // acquiring a P in several places.
  1836  //
  1837  //go:yeswritebarrierrec
  1838  func execute(gp *g, inheritTime bool) {
  1839  	_g_ := getg()
  1840  
  1841  	casgstatus(gp, _Grunnable, _Grunning)
  1842  	gp.waitsince = 0
  1843  	gp.preempt = false
  1844  	gp.stackguard0 = gp.stack.lo + _StackGuard
  1845  	if !inheritTime {
  1846  		_g_.m.p.ptr().schedtick++
  1847  	}
  1848  	_g_.m.curg = gp
  1849  	gp.m = _g_.m
  1850  
  1851  	// Check whether the profiler needs to be turned on or off.
  1852  	hz := sched.profilehz
  1853  	if _g_.m.profilehz != hz {
  1854  		resetcpuprofiler(hz)
  1855  	}
  1856  
  1857  	if trace.enabled {
  1858  		// GoSysExit has to happen when we have a P, but before GoStart.
  1859  		// So we emit it here.
  1860  		if gp.syscallsp != 0 && gp.sysblocktraced {
  1861  			traceGoSysExit(gp.sysexitticks)
  1862  		}
  1863  		traceGoStart()
  1864  	}
  1865  
  1866  	gogo(&gp.sched)
  1867  }
  1868  
  1869  // Finds a runnable goroutine to execute.
  1870  // Tries to steal from other P's, get g from global queue, poll network.
  1871  func findrunnable() (gp *g, inheritTime bool) {
  1872  	_g_ := getg()
  1873  
  1874  	// The conditions here and in handoffp must agree: if
  1875  	// findrunnable would return a G to run, handoffp must start
  1876  	// an M.
  1877  
  1878  top:
  1879  	_p_ := _g_.m.p.ptr()
  1880  	if sched.gcwaiting != 0 {
  1881  		gcstopm()
  1882  		goto top
  1883  	}
  1884  	if _p_.runSafePointFn != 0 {
  1885  		runSafePointFn()
  1886  	}
  1887  	if fingwait && fingwake {
  1888  		if gp := wakefing(); gp != nil {
  1889  			ready(gp, 0, true)
  1890  		}
  1891  	}
  1892  
  1893  	// local runq
  1894  	if gp, inheritTime := runqget(_p_); gp != nil {
  1895  		return gp, inheritTime
  1896  	}
  1897  
  1898  	// global runq
  1899  	if sched.runqsize != 0 {
  1900  		lock(&sched.lock)
  1901  		gp := globrunqget(_p_, 0)
  1902  		unlock(&sched.lock)
  1903  		if gp != nil {
  1904  			return gp, false
  1905  		}
  1906  	}
  1907  
  1908  	// Poll network.
  1909  	// This netpoll is only an optimization before we resort to stealing.
  1910  	// We can safely skip it if there a thread blocked in netpoll already.
  1911  	// If there is any kind of logical race with that blocked thread
  1912  	// (e.g. it has already returned from netpoll, but does not set lastpoll yet),
  1913  	// this thread will do blocking netpoll below anyway.
  1914  	if netpollinited() && sched.lastpoll != 0 {
  1915  		if gp := netpoll(false); gp != nil { // non-blocking
  1916  			// netpoll returns list of goroutines linked by schedlink.
  1917  			injectglist(gp.schedlink.ptr())
  1918  			casgstatus(gp, _Gwaiting, _Grunnable)
  1919  			if trace.enabled {
  1920  				traceGoUnpark(gp, 0)
  1921  			}
  1922  			return gp, false
  1923  		}
  1924  	}
  1925  
  1926  	// Steal work from other P's.
  1927  	procs := uint32(gomaxprocs)
  1928  	if atomic.Load(&sched.npidle) == procs-1 {
  1929  		// Either GOMAXPROCS=1 or everybody, except for us, is idle already.
  1930  		// New work can appear from returning syscall/cgocall, network or timers.
  1931  		// Neither of that submits to local run queues, so no point in stealing.
  1932  		goto stop
  1933  	}
  1934  	// If number of spinning M's >= number of busy P's, block.
  1935  	// This is necessary to prevent excessive CPU consumption
  1936  	// when GOMAXPROCS>>1 but the program parallelism is low.
  1937  	if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
  1938  		goto stop
  1939  	}
  1940  	if !_g_.m.spinning {
  1941  		_g_.m.spinning = true
  1942  		atomic.Xadd(&sched.nmspinning, 1)
  1943  	}
  1944  	for i := 0; i < 4; i++ {
  1945  		for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
  1946  			if sched.gcwaiting != 0 {
  1947  				goto top
  1948  			}
  1949  			stealRunNextG := i > 2 // first look for ready queues with more than 1 g
  1950  			if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil {
  1951  				return gp, false
  1952  			}
  1953  		}
  1954  	}
  1955  
  1956  stop:
  1957  
  1958  	// We have nothing to do. If we're in the GC mark phase, can
  1959  	// safely scan and blacken objects, and have work to do, run
  1960  	// idle-time marking rather than give up the P.
  1961  	if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
  1962  		_p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
  1963  		gp := _p_.gcBgMarkWorker.ptr()
  1964  		casgstatus(gp, _Gwaiting, _Grunnable)
  1965  		if trace.enabled {
  1966  			traceGoUnpark(gp, 0)
  1967  		}
  1968  		return gp, false
  1969  	}
  1970  
  1971  	// return P and block
  1972  	lock(&sched.lock)
  1973  	if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
  1974  		unlock(&sched.lock)
  1975  		goto top
  1976  	}
  1977  	if sched.runqsize != 0 {
  1978  		gp := globrunqget(_p_, 0)
  1979  		unlock(&sched.lock)
  1980  		return gp, false
  1981  	}
  1982  	if releasep() != _p_ {
  1983  		throw("findrunnable: wrong p")
  1984  	}
  1985  	pidleput(_p_)
  1986  	unlock(&sched.lock)
  1987  
  1988  	// Delicate dance: thread transitions from spinning to non-spinning state,
  1989  	// potentially concurrently with submission of new goroutines. We must
  1990  	// drop nmspinning first and then check all per-P queues again (with
  1991  	// #StoreLoad memory barrier in between). If we do it the other way around,
  1992  	// another thread can submit a goroutine after we've checked all run queues
  1993  	// but before we drop nmspinning; as the result nobody will unpark a thread
  1994  	// to run the goroutine.
  1995  	// If we discover new work below, we need to restore m.spinning as a signal
  1996  	// for resetspinning to unpark a new worker thread (because there can be more
  1997  	// than one starving goroutine). However, if after discovering new work
  1998  	// we also observe no idle Ps, it is OK to just park the current thread:
  1999  	// the system is fully loaded so no spinning threads are required.
  2000  	// Also see "Worker thread parking/unparking" comment at the top of the file.
  2001  	wasSpinning := _g_.m.spinning
  2002  	if _g_.m.spinning {
  2003  		_g_.m.spinning = false
  2004  		if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
  2005  			throw("findrunnable: negative nmspinning")
  2006  		}
  2007  	}
  2008  
  2009  	// check all runqueues once again
  2010  	for i := 0; i < int(gomaxprocs); i++ {
  2011  		_p_ := allp[i]
  2012  		if _p_ != nil && !runqempty(_p_) {
  2013  			lock(&sched.lock)
  2014  			_p_ = pidleget()
  2015  			unlock(&sched.lock)
  2016  			if _p_ != nil {
  2017  				acquirep(_p_)
  2018  				if wasSpinning {
  2019  					_g_.m.spinning = true
  2020  					atomic.Xadd(&sched.nmspinning, 1)
  2021  				}
  2022  				goto top
  2023  			}
  2024  			break
  2025  		}
  2026  	}
  2027  
  2028  	// poll network
  2029  	if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
  2030  		if _g_.m.p != 0 {
  2031  			throw("findrunnable: netpoll with p")
  2032  		}
  2033  		if _g_.m.spinning {
  2034  			throw("findrunnable: netpoll with spinning")
  2035  		}
  2036  		gp := netpoll(true) // block until new work is available
  2037  		atomic.Store64(&sched.lastpoll, uint64(nanotime()))
  2038  		if gp != nil {
  2039  			lock(&sched.lock)
  2040  			_p_ = pidleget()
  2041  			unlock(&sched.lock)
  2042  			if _p_ != nil {
  2043  				acquirep(_p_)
  2044  				injectglist(gp.schedlink.ptr())
  2045  				casgstatus(gp, _Gwaiting, _Grunnable)
  2046  				if trace.enabled {
  2047  					traceGoUnpark(gp, 0)
  2048  				}
  2049  				return gp, false
  2050  			}
  2051  			injectglist(gp)
  2052  		}
  2053  	}
  2054  	stopm()
  2055  	goto top
  2056  }
  2057  
  2058  func resetspinning() {
  2059  	_g_ := getg()
  2060  	if !_g_.m.spinning {
  2061  		throw("resetspinning: not a spinning m")
  2062  	}
  2063  	_g_.m.spinning = false
  2064  	nmspinning := atomic.Xadd(&sched.nmspinning, -1)
  2065  	if int32(nmspinning) < 0 {
  2066  		throw("findrunnable: negative nmspinning")
  2067  	}
  2068  	// M wakeup policy is deliberately somewhat conservative, so check if we
  2069  	// need to wakeup another P here. See "Worker thread parking/unparking"
  2070  	// comment at the top of the file for details.
  2071  	if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
  2072  		wakep()
  2073  	}
  2074  }
  2075  
  2076  // Injects the list of runnable G's into the scheduler.
  2077  // Can run concurrently with GC.
  2078  func injectglist(glist *g) {
  2079  	if glist == nil {
  2080  		return
  2081  	}
  2082  	if trace.enabled {
  2083  		for gp := glist; gp != nil; gp = gp.schedlink.ptr() {
  2084  			traceGoUnpark(gp, 0)
  2085  		}
  2086  	}
  2087  	lock(&sched.lock)
  2088  	var n int
  2089  	for n = 0; glist != nil; n++ {
  2090  		gp := glist
  2091  		glist = gp.schedlink.ptr()
  2092  		casgstatus(gp, _Gwaiting, _Grunnable)
  2093  		globrunqput(gp)
  2094  	}
  2095  	unlock(&sched.lock)
  2096  	for ; n != 0 && sched.npidle != 0; n-- {
  2097  		startm(nil, false)
  2098  	}
  2099  }
  2100  
  2101  // One round of scheduler: find a runnable goroutine and execute it.
  2102  // Never returns.
  2103  func schedule() {
  2104  	_g_ := getg()
  2105  
  2106  	if _g_.m.locks != 0 {
  2107  		throw("schedule: holding locks")
  2108  	}
  2109  
  2110  	if _g_.m.lockedg != nil {
  2111  		stoplockedm()
  2112  		execute(_g_.m.lockedg, false) // Never returns.
  2113  	}
  2114  
  2115  top:
  2116  	if sched.gcwaiting != 0 {
  2117  		gcstopm()
  2118  		goto top
  2119  	}
  2120  	if _g_.m.p.ptr().runSafePointFn != 0 {
  2121  		runSafePointFn()
  2122  	}
  2123  
  2124  	var gp *g
  2125  	var inheritTime bool
  2126  	if trace.enabled || trace.shutdown {
  2127  		gp = traceReader()
  2128  		if gp != nil {
  2129  			casgstatus(gp, _Gwaiting, _Grunnable)
  2130  			traceGoUnpark(gp, 0)
  2131  		}
  2132  	}
  2133  	if gp == nil && gcBlackenEnabled != 0 {
  2134  		gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
  2135  	}
  2136  	if gp == nil {
  2137  		// Check the global runnable queue once in a while to ensure fairness.
  2138  		// Otherwise two goroutines can completely occupy the local runqueue
  2139  		// by constantly respawning each other.
  2140  		if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
  2141  			lock(&sched.lock)
  2142  			gp = globrunqget(_g_.m.p.ptr(), 1)
  2143  			unlock(&sched.lock)
  2144  		}
  2145  	}
  2146  	if gp == nil {
  2147  		gp, inheritTime = runqget(_g_.m.p.ptr())
  2148  		if gp != nil && _g_.m.spinning {
  2149  			throw("schedule: spinning with local work")
  2150  		}
  2151  	}
  2152  	if gp == nil {
  2153  		gp, inheritTime = findrunnable() // blocks until work is available
  2154  	}
  2155  
  2156  	// This thread is going to run a goroutine and is not spinning anymore,
  2157  	// so if it was marked as spinning we need to reset it now and potentially
  2158  	// start a new spinning M.
  2159  	if _g_.m.spinning {
  2160  		resetspinning()
  2161  	}
  2162  
  2163  	if gp.lockedm != nil {
  2164  		// Hands off own p to the locked m,
  2165  		// then blocks waiting for a new p.
  2166  		startlockedm(gp)
  2167  		goto top
  2168  	}
  2169  
  2170  	execute(gp, inheritTime)
  2171  }
  2172  
  2173  // dropg removes the association between m and the current goroutine m->curg (gp for short).
  2174  // Typically a caller sets gp's status away from Grunning and then
  2175  // immediately calls dropg to finish the job. The caller is also responsible
  2176  // for arranging that gp will be restarted using ready at an
  2177  // appropriate time. After calling dropg and arranging for gp to be
  2178  // readied later, the caller can do other work but eventually should
  2179  // call schedule to restart the scheduling of goroutines on this m.
  2180  func dropg() {
  2181  	_g_ := getg()
  2182  
  2183  	setMNoWB(&_g_.m.curg.m, nil)
  2184  	setGNoWB(&_g_.m.curg, nil)
  2185  }
  2186  
  2187  func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
  2188  	unlock((*mutex)(lock))
  2189  	return true
  2190  }
  2191  
  2192  // park continuation on g0.
  2193  func park_m(gp *g) {
  2194  	_g_ := getg()
  2195  
  2196  	if trace.enabled {
  2197  		traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp)
  2198  	}
  2199  
  2200  	casgstatus(gp, _Grunning, _Gwaiting)
  2201  	dropg()
  2202  
  2203  	if _g_.m.waitunlockf != nil {
  2204  		fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
  2205  		ok := fn(gp, _g_.m.waitlock)
  2206  		_g_.m.waitunlockf = nil
  2207  		_g_.m.waitlock = nil
  2208  		if !ok {
  2209  			if trace.enabled {
  2210  				traceGoUnpark(gp, 2)
  2211  			}
  2212  			casgstatus(gp, _Gwaiting, _Grunnable)
  2213  			execute(gp, true) // Schedule it back, never returns.
  2214  		}
  2215  	}
  2216  	schedule()
  2217  }
  2218  
  2219  func goschedImpl(gp *g) {
  2220  	status := readgstatus(gp)
  2221  	if status&^_Gscan != _Grunning {
  2222  		dumpgstatus(gp)
  2223  		throw("bad g status")
  2224  	}
  2225  	casgstatus(gp, _Grunning, _Grunnable)
  2226  	dropg()
  2227  	lock(&sched.lock)
  2228  	globrunqput(gp)
  2229  	unlock(&sched.lock)
  2230  
  2231  	schedule()
  2232  }
  2233  
  2234  // Gosched continuation on g0.
  2235  func gosched_m(gp *g) {
  2236  	if trace.enabled {
  2237  		traceGoSched()
  2238  	}
  2239  	goschedImpl(gp)
  2240  }
  2241  
  2242  func gopreempt_m(gp *g) {
  2243  	if trace.enabled {
  2244  		traceGoPreempt()
  2245  	}
  2246  	goschedImpl(gp)
  2247  }
  2248  
  2249  // Finishes execution of the current goroutine.
  2250  func goexit1() {
  2251  	if raceenabled {
  2252  		racegoend()
  2253  	}
  2254  	if trace.enabled {
  2255  		traceGoEnd()
  2256  	}
  2257  	mcall(goexit0)
  2258  }
  2259  
  2260  // goexit continuation on g0.
  2261  func goexit0(gp *g) {
  2262  	_g_ := getg()
  2263  
  2264  	casgstatus(gp, _Grunning, _Gdead)
  2265  	if isSystemGoroutine(gp) {
  2266  		atomic.Xadd(&sched.ngsys, -1)
  2267  	}
  2268  	gp.m = nil
  2269  	gp.lockedm = nil
  2270  	_g_.m.lockedg = nil
  2271  	gp.paniconfault = false
  2272  	gp._defer = nil // should be true already but just in case.
  2273  	gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
  2274  	gp.writebuf = nil
  2275  	gp.waitreason = ""
  2276  	gp.param = nil
  2277  
  2278  	// Note that gp's stack scan is now "valid" because it has no
  2279  	// stack. We could dequeueRescan, but that takes a lock and
  2280  	// isn't really necessary.
  2281  	gp.gcscanvalid = true
  2282  	dropg()
  2283  
  2284  	if _g_.m.locked&^_LockExternal != 0 {
  2285  		print("invalid m->locked = ", _g_.m.locked, "\n")
  2286  		throw("internal lockOSThread error")
  2287  	}
  2288  	_g_.m.locked = 0
  2289  	gfput(_g_.m.p.ptr(), gp)
  2290  	schedule()
  2291  }
  2292  
  2293  // save updates getg().sched to refer to pc and sp so that a following
  2294  // gogo will restore pc and sp.
  2295  //
  2296  // save must not have write barriers because invoking a write barrier
  2297  // can clobber getg().sched.
  2298  //
  2299  //go:nosplit
  2300  //go:nowritebarrierrec
  2301  func save(pc, sp uintptr) {
  2302  	_g_ := getg()
  2303  
  2304  	_g_.sched.pc = pc
  2305  	_g_.sched.sp = sp
  2306  	_g_.sched.lr = 0
  2307  	_g_.sched.ret = 0
  2308  	_g_.sched.g = guintptr(unsafe.Pointer(_g_))
  2309  	// We need to ensure ctxt is zero, but can't have a write
  2310  	// barrier here. However, it should always already be zero.
  2311  	// Assert that.
  2312  	if _g_.sched.ctxt != nil {
  2313  		badctxt()
  2314  	}
  2315  }
  2316  
  2317  // The goroutine g is about to enter a system call.
  2318  // Record that it's not using the cpu anymore.
  2319  // This is called only from the go syscall library and cgocall,
  2320  // not from the low-level system calls used by the runtime.
  2321  //
  2322  // Entersyscall cannot split the stack: the gosave must
  2323  // make g->sched refer to the caller's stack segment, because
  2324  // entersyscall is going to return immediately after.
  2325  //
  2326  // Nothing entersyscall calls can split the stack either.
  2327  // We cannot safely move the stack during an active call to syscall,
  2328  // because we do not know which of the uintptr arguments are
  2329  // really pointers (back into the stack).
  2330  // In practice, this means that we make the fast path run through
  2331  // entersyscall doing no-split things, and the slow path has to use systemstack
  2332  // to run bigger things on the system stack.
  2333  //
  2334  // reentersyscall is the entry point used by cgo callbacks, where explicitly
  2335  // saved SP and PC are restored. This is needed when exitsyscall will be called
  2336  // from a function further up in the call stack than the parent, as g->syscallsp
  2337  // must always point to a valid stack frame. entersyscall below is the normal
  2338  // entry point for syscalls, which obtains the SP and PC from the caller.
  2339  //
  2340  // Syscall tracing:
  2341  // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
  2342  // If the syscall does not block, that is it, we do not emit any other events.
  2343  // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
  2344  // when syscall returns we emit traceGoSysExit and when the goroutine starts running
  2345  // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
  2346  // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
  2347  // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
  2348  // whoever emits traceGoSysBlock increments p.syscalltick afterwards;
  2349  // and we wait for the increment before emitting traceGoSysExit.
  2350  // Note that the increment is done even if tracing is not enabled,
  2351  // because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
  2352  //
  2353  //go:nosplit
  2354  func reentersyscall(pc, sp uintptr) {
  2355  	_g_ := getg()
  2356  
  2357  	// Disable preemption because during this function g is in Gsyscall status,
  2358  	// but can have inconsistent g->sched, do not let GC observe it.
  2359  	_g_.m.locks++
  2360  
  2361  	// Entersyscall must not call any function that might split/grow the stack.
  2362  	// (See details in comment above.)
  2363  	// Catch calls that might, by replacing the stack guard with something that
  2364  	// will trip any stack check and leaving a flag to tell newstack to die.
  2365  	_g_.stackguard0 = stackPreempt
  2366  	_g_.throwsplit = true
  2367  
  2368  	// Leave SP around for GC and traceback.
  2369  	save(pc, sp)
  2370  	_g_.syscallsp = sp
  2371  	_g_.syscallpc = pc
  2372  	casgstatus(_g_, _Grunning, _Gsyscall)
  2373  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  2374  		systemstack(func() {
  2375  			print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  2376  			throw("entersyscall")
  2377  		})
  2378  	}
  2379  
  2380  	if trace.enabled {
  2381  		systemstack(traceGoSysCall)
  2382  		// systemstack itself clobbers g.sched.{pc,sp} and we might
  2383  		// need them later when the G is genuinely blocked in a
  2384  		// syscall
  2385  		save(pc, sp)
  2386  	}
  2387  
  2388  	if atomic.Load(&sched.sysmonwait) != 0 {
  2389  		systemstack(entersyscall_sysmon)
  2390  		save(pc, sp)
  2391  	}
  2392  
  2393  	if _g_.m.p.ptr().runSafePointFn != 0 {
  2394  		// runSafePointFn may stack split if run on this stack
  2395  		systemstack(runSafePointFn)
  2396  		save(pc, sp)
  2397  	}
  2398  
  2399  	_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
  2400  	_g_.sysblocktraced = true
  2401  	_g_.m.mcache = nil
  2402  	_g_.m.p.ptr().m = 0
  2403  	atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
  2404  	if sched.gcwaiting != 0 {
  2405  		systemstack(entersyscall_gcwait)
  2406  		save(pc, sp)
  2407  	}
  2408  
  2409  	// Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
  2410  	// We set _StackGuard to StackPreempt so that first split stack check calls morestack.
  2411  	// Morestack detects this case and throws.
  2412  	_g_.stackguard0 = stackPreempt
  2413  	_g_.m.locks--
  2414  }
  2415  
  2416  // Standard syscall entry used by the go syscall library and normal cgo calls.
  2417  //go:nosplit
  2418  func entersyscall(dummy int32) {
  2419  	reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
  2420  }
  2421  
  2422  func entersyscall_sysmon() {
  2423  	lock(&sched.lock)
  2424  	if atomic.Load(&sched.sysmonwait) != 0 {
  2425  		atomic.Store(&sched.sysmonwait, 0)
  2426  		notewakeup(&sched.sysmonnote)
  2427  	}
  2428  	unlock(&sched.lock)
  2429  }
  2430  
  2431  func entersyscall_gcwait() {
  2432  	_g_ := getg()
  2433  	_p_ := _g_.m.p.ptr()
  2434  
  2435  	lock(&sched.lock)
  2436  	if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
  2437  		if trace.enabled {
  2438  			traceGoSysBlock(_p_)
  2439  			traceProcStop(_p_)
  2440  		}
  2441  		_p_.syscalltick++
  2442  		if sched.stopwait--; sched.stopwait == 0 {
  2443  			notewakeup(&sched.stopnote)
  2444  		}
  2445  	}
  2446  	unlock(&sched.lock)
  2447  }
  2448  
  2449  // The same as entersyscall(), but with a hint that the syscall is blocking.
  2450  //go:nosplit
  2451  func entersyscallblock(dummy int32) {
  2452  	_g_ := getg()
  2453  
  2454  	_g_.m.locks++ // see comment in entersyscall
  2455  	_g_.throwsplit = true
  2456  	_g_.stackguard0 = stackPreempt // see comment in entersyscall
  2457  	_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
  2458  	_g_.sysblocktraced = true
  2459  	_g_.m.p.ptr().syscalltick++
  2460  
  2461  	// Leave SP around for GC and traceback.
  2462  	pc := getcallerpc(unsafe.Pointer(&dummy))
  2463  	sp := getcallersp(unsafe.Pointer(&dummy))
  2464  	save(pc, sp)
  2465  	_g_.syscallsp = _g_.sched.sp
  2466  	_g_.syscallpc = _g_.sched.pc
  2467  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  2468  		sp1 := sp
  2469  		sp2 := _g_.sched.sp
  2470  		sp3 := _g_.syscallsp
  2471  		systemstack(func() {
  2472  			print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  2473  			throw("entersyscallblock")
  2474  		})
  2475  	}
  2476  	casgstatus(_g_, _Grunning, _Gsyscall)
  2477  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  2478  		systemstack(func() {
  2479  			print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  2480  			throw("entersyscallblock")
  2481  		})
  2482  	}
  2483  
  2484  	systemstack(entersyscallblock_handoff)
  2485  
  2486  	// Resave for traceback during blocked call.
  2487  	save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
  2488  
  2489  	_g_.m.locks--
  2490  }
  2491  
  2492  func entersyscallblock_handoff() {
  2493  	if trace.enabled {
  2494  		traceGoSysCall()
  2495  		traceGoSysBlock(getg().m.p.ptr())
  2496  	}
  2497  	handoffp(releasep())
  2498  }
  2499  
  2500  // The goroutine g exited its system call.
  2501  // Arrange for it to run on a cpu again.
  2502  // This is called only from the go syscall library, not
  2503  // from the low-level system calls used by the runtime.
  2504  //
  2505  // Write barriers are not allowed because our P may have been stolen.
  2506  //
  2507  //go:nosplit
  2508  //go:nowritebarrierrec
  2509  func exitsyscall(dummy int32) {
  2510  	_g_ := getg()
  2511  
  2512  	_g_.m.locks++ // see comment in entersyscall
  2513  	if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp {
  2514  		// throw calls print which may try to grow the stack,
  2515  		// but throwsplit == true so the stack can not be grown;
  2516  		// use systemstack to avoid that possible problem.
  2517  		systemstack(func() {
  2518  			throw("exitsyscall: syscall frame is no longer valid")
  2519  		})
  2520  	}
  2521  
  2522  	_g_.waitsince = 0
  2523  	oldp := _g_.m.p.ptr()
  2524  	if exitsyscallfast() {
  2525  		if _g_.m.mcache == nil {
  2526  			throw("lost mcache")
  2527  		}
  2528  		if trace.enabled {
  2529  			if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
  2530  				systemstack(traceGoStart)
  2531  			}
  2532  		}
  2533  		// There's a cpu for us, so we can run.
  2534  		_g_.m.p.ptr().syscalltick++
  2535  		// We need to cas the status and scan before resuming...
  2536  		casgstatus(_g_, _Gsyscall, _Grunning)
  2537  
  2538  		// Garbage collector isn't running (since we are),
  2539  		// so okay to clear syscallsp.
  2540  		_g_.syscallsp = 0
  2541  		_g_.m.locks--
  2542  		if _g_.preempt {
  2543  			// restore the preemption request in case we've cleared it in newstack
  2544  			_g_.stackguard0 = stackPreempt
  2545  		} else {
  2546  			// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
  2547  			_g_.stackguard0 = _g_.stack.lo + _StackGuard
  2548  		}
  2549  		_g_.throwsplit = false
  2550  		return
  2551  	}
  2552  
  2553  	_g_.sysexitticks = 0
  2554  	if trace.enabled {
  2555  		// Wait till traceGoSysBlock event is emitted.
  2556  		// This ensures consistency of the trace (the goroutine is started after it is blocked).
  2557  		for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
  2558  			osyield()
  2559  		}
  2560  		// We can't trace syscall exit right now because we don't have a P.
  2561  		// Tracing code can invoke write barriers that cannot run without a P.
  2562  		// So instead we remember the syscall exit time and emit the event
  2563  		// in execute when we have a P.
  2564  		_g_.sysexitticks = cputicks()
  2565  	}
  2566  
  2567  	_g_.m.locks--
  2568  
  2569  	// Call the scheduler.
  2570  	mcall(exitsyscall0)
  2571  
  2572  	if _g_.m.mcache == nil {
  2573  		throw("lost mcache")
  2574  	}
  2575  
  2576  	// Scheduler returned, so we're allowed to run now.
  2577  	// Delete the syscallsp information that we left for
  2578  	// the garbage collector during the system call.
  2579  	// Must wait until now because until gosched returns
  2580  	// we don't know for sure that the garbage collector
  2581  	// is not running.
  2582  	_g_.syscallsp = 0
  2583  	_g_.m.p.ptr().syscalltick++
  2584  	_g_.throwsplit = false
  2585  }
  2586  
  2587  //go:nosplit
  2588  func exitsyscallfast() bool {
  2589  	_g_ := getg()
  2590  
  2591  	// Freezetheworld sets stopwait but does not retake P's.
  2592  	if sched.stopwait == freezeStopWait {
  2593  		_g_.m.mcache = nil
  2594  		_g_.m.p = 0
  2595  		return false
  2596  	}
  2597  
  2598  	// Try to re-acquire the last P.
  2599  	if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
  2600  		// There's a cpu for us, so we can run.
  2601  		exitsyscallfast_reacquired()
  2602  		return true
  2603  	}
  2604  
  2605  	// Try to get any other idle P.
  2606  	oldp := _g_.m.p.ptr()
  2607  	_g_.m.mcache = nil
  2608  	_g_.m.p = 0
  2609  	if sched.pidle != 0 {
  2610  		var ok bool
  2611  		systemstack(func() {
  2612  			ok = exitsyscallfast_pidle()
  2613  			if ok && trace.enabled {
  2614  				if oldp != nil {
  2615  					// Wait till traceGoSysBlock event is emitted.
  2616  					// This ensures consistency of the trace (the goroutine is started after it is blocked).
  2617  					for oldp.syscalltick == _g_.m.syscalltick {
  2618  						osyield()
  2619  					}
  2620  				}
  2621  				traceGoSysExit(0)
  2622  			}
  2623  		})
  2624  		if ok {
  2625  			return true
  2626  		}
  2627  	}
  2628  	return false
  2629  }
  2630  
  2631  // exitsyscallfast_reacquired is the exitsyscall path on which this G
  2632  // has successfully reacquired the P it was running on before the
  2633  // syscall.
  2634  //
  2635  // This function is allowed to have write barriers because exitsyscall
  2636  // has acquired a P at this point.
  2637  //
  2638  //go:yeswritebarrierrec
  2639  //go:nosplit
  2640  func exitsyscallfast_reacquired() {
  2641  	_g_ := getg()
  2642  	_g_.m.mcache = _g_.m.p.ptr().mcache
  2643  	_g_.m.p.ptr().m.set(_g_.m)
  2644  	if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
  2645  		if trace.enabled {
  2646  			// The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
  2647  			// traceGoSysBlock for this syscall was already emitted,
  2648  			// but here we effectively retake the p from the new syscall running on the same p.
  2649  			systemstack(func() {
  2650  				// Denote blocking of the new syscall.
  2651  				traceGoSysBlock(_g_.m.p.ptr())
  2652  				// Denote completion of the current syscall.
  2653  				traceGoSysExit(0)
  2654  			})
  2655  		}
  2656  		_g_.m.p.ptr().syscalltick++
  2657  	}
  2658  }
  2659  
  2660  func exitsyscallfast_pidle() bool {
  2661  	lock(&sched.lock)
  2662  	_p_ := pidleget()
  2663  	if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
  2664  		atomic.Store(&sched.sysmonwait, 0)
  2665  		notewakeup(&sched.sysmonnote)
  2666  	}
  2667  	unlock(&sched.lock)
  2668  	if _p_ != nil {
  2669  		acquirep(_p_)
  2670  		return true
  2671  	}
  2672  	return false
  2673  }
  2674  
  2675  // exitsyscall slow path on g0.
  2676  // Failed to acquire P, enqueue gp as runnable.
  2677  //
  2678  //go:nowritebarrierrec
  2679  func exitsyscall0(gp *g) {
  2680  	_g_ := getg()
  2681  
  2682  	casgstatus(gp, _Gsyscall, _Grunnable)
  2683  	dropg()
  2684  	lock(&sched.lock)
  2685  	_p_ := pidleget()
  2686  	if _p_ == nil {
  2687  		globrunqput(gp)
  2688  	} else if atomic.Load(&sched.sysmonwait) != 0 {
  2689  		atomic.Store(&sched.sysmonwait, 0)
  2690  		notewakeup(&sched.sysmonnote)
  2691  	}
  2692  	unlock(&sched.lock)
  2693  	if _p_ != nil {
  2694  		acquirep(_p_)
  2695  		execute(gp, false) // Never returns.
  2696  	}
  2697  	if _g_.m.lockedg != nil {
  2698  		// Wait until another thread schedules gp and so m again.
  2699  		stoplockedm()
  2700  		execute(gp, false) // Never returns.
  2701  	}
  2702  	stopm()
  2703  	schedule() // Never returns.
  2704  }
  2705  
  2706  func beforefork() {
  2707  	gp := getg().m.curg
  2708  
  2709  	// Fork can hang if preempted with signals frequently enough (see issue 5517).
  2710  	// Ensure that we stay on the same M where we disable profiling.
  2711  	gp.m.locks++
  2712  	if gp.m.profilehz != 0 {
  2713  		resetcpuprofiler(0)
  2714  	}
  2715  
  2716  	// This function is called before fork in syscall package.
  2717  	// Code between fork and exec must not allocate memory nor even try to grow stack.
  2718  	// Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
  2719  	// runtime_AfterFork will undo this in parent process, but not in child.
  2720  	gp.stackguard0 = stackFork
  2721  }
  2722  
  2723  // Called from syscall package before fork.
  2724  //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
  2725  //go:nosplit
  2726  func syscall_runtime_BeforeFork() {
  2727  	systemstack(beforefork)
  2728  }
  2729  
  2730  func afterfork() {
  2731  	gp := getg().m.curg
  2732  
  2733  	// See the comment in beforefork.
  2734  	gp.stackguard0 = gp.stack.lo + _StackGuard
  2735  
  2736  	hz := sched.profilehz
  2737  	if hz != 0 {
  2738  		resetcpuprofiler(hz)
  2739  	}
  2740  	gp.m.locks--
  2741  }
  2742  
  2743  // Called from syscall package after fork in parent.
  2744  //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
  2745  //go:nosplit
  2746  func syscall_runtime_AfterFork() {
  2747  	systemstack(afterfork)
  2748  }
  2749  
  2750  // Allocate a new g, with a stack big enough for stacksize bytes.
  2751  func malg(stacksize int32) *g {
  2752  	newg := new(g)
  2753  	if stacksize >= 0 {
  2754  		stacksize = round2(_StackSystem + stacksize)
  2755  		systemstack(func() {
  2756  			newg.stack, newg.stkbar = stackalloc(uint32(stacksize))
  2757  		})
  2758  		newg.stackguard0 = newg.stack.lo + _StackGuard
  2759  		newg.stackguard1 = ^uintptr(0)
  2760  		newg.stackAlloc = uintptr(stacksize)
  2761  	}
  2762  	return newg
  2763  }
  2764  
  2765  // Create a new g running fn with siz bytes of arguments.
  2766  // Put it on the queue of g's waiting to run.
  2767  // The compiler turns a go statement into a call to this.
  2768  // Cannot split the stack because it assumes that the arguments
  2769  // are available sequentially after &fn; they would not be
  2770  // copied if a stack split occurred.
  2771  //go:nosplit
  2772  func newproc(siz int32, fn *funcval) {
  2773  	argp := add(unsafe.Pointer(&fn), sys.PtrSize)
  2774  	pc := getcallerpc(unsafe.Pointer(&siz))
  2775  	systemstack(func() {
  2776  		newproc1(fn, (*uint8)(argp), siz, 0, pc)
  2777  	})
  2778  }
  2779  
  2780  // Create a new g running fn with narg bytes of arguments starting
  2781  // at argp and returning nret bytes of results.  callerpc is the
  2782  // address of the go statement that created this. The new g is put
  2783  // on the queue of g's waiting to run.
  2784  func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g {
  2785  	_g_ := getg()
  2786  
  2787  	if fn == nil {
  2788  		_g_.m.throwing = -1 // do not dump full stacks
  2789  		throw("go of nil func value")
  2790  	}
  2791  	_g_.m.locks++ // disable preemption because it can be holding p in a local var
  2792  	siz := narg + nret
  2793  	siz = (siz + 7) &^ 7
  2794  
  2795  	// We could allocate a larger initial stack if necessary.
  2796  	// Not worth it: this is almost always an error.
  2797  	// 4*sizeof(uintreg): extra space added below
  2798  	// sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
  2799  	if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
  2800  		throw("newproc: function arguments too large for new goroutine")
  2801  	}
  2802  
  2803  	_p_ := _g_.m.p.ptr()
  2804  	newg := gfget(_p_)
  2805  	if newg == nil {
  2806  		newg = malg(_StackMin)
  2807  		casgstatus(newg, _Gidle, _Gdead)
  2808  		newg.gcRescan = -1
  2809  		allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
  2810  	}
  2811  	if newg.stack.hi == 0 {
  2812  		throw("newproc1: newg missing stack")
  2813  	}
  2814  
  2815  	if readgstatus(newg) != _Gdead {
  2816  		throw("newproc1: new g is not Gdead")
  2817  	}
  2818  
  2819  	totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
  2820  	totalSize += -totalSize & (sys.SpAlign - 1)                  // align to spAlign
  2821  	sp := newg.stack.hi - totalSize
  2822  	spArg := sp
  2823  	if usesLR {
  2824  		// caller's LR
  2825  		*(*uintptr)(unsafe.Pointer(sp)) = 0
  2826  		prepGoExitFrame(sp)
  2827  		spArg += sys.MinFrameSize
  2828  	}
  2829  	if narg > 0 {
  2830  		memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg))
  2831  		// This is a stack-to-stack copy. If write barriers
  2832  		// are enabled and the source stack is grey (the
  2833  		// destination is always black), then perform a
  2834  		// barrier copy. We do this *after* the memmove
  2835  		// because the destination stack may have garbage on
  2836  		// it.
  2837  		if writeBarrier.needed && !_g_.m.curg.gcscandone {
  2838  			f := findfunc(fn.fn)
  2839  			stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
  2840  			// We're in the prologue, so it's always stack map index 0.
  2841  			bv := stackmapdata(stkmap, 0)
  2842  			bulkBarrierBitmap(spArg, spArg, uintptr(narg), 0, bv.bytedata)
  2843  		}
  2844  	}
  2845  
  2846  	memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
  2847  	newg.sched.sp = sp
  2848  	newg.stktopsp = sp
  2849  	newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
  2850  	newg.sched.g = guintptr(unsafe.Pointer(newg))
  2851  	gostartcallfn(&newg.sched, fn)
  2852  	newg.gopc = callerpc
  2853  	newg.startpc = fn.fn
  2854  	if isSystemGoroutine(newg) {
  2855  		atomic.Xadd(&sched.ngsys, +1)
  2856  	}
  2857  	// The stack is dirty from the argument frame, so queue it for
  2858  	// scanning. Do this before setting it to runnable so we still
  2859  	// own the G. If we're recycling a G, it may already be on the
  2860  	// rescan list.
  2861  	if newg.gcRescan == -1 {
  2862  		queueRescan(newg)
  2863  	} else {
  2864  		// The recycled G is already on the rescan list. Just
  2865  		// mark the stack dirty.
  2866  		newg.gcscanvalid = false
  2867  	}
  2868  	casgstatus(newg, _Gdead, _Grunnable)
  2869  
  2870  	if _p_.goidcache == _p_.goidcacheend {
  2871  		// Sched.goidgen is the last allocated id,
  2872  		// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
  2873  		// At startup sched.goidgen=0, so main goroutine receives goid=1.
  2874  		_p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
  2875  		_p_.goidcache -= _GoidCacheBatch - 1
  2876  		_p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
  2877  	}
  2878  	newg.goid = int64(_p_.goidcache)
  2879  	_p_.goidcache++
  2880  	if raceenabled {
  2881  		newg.racectx = racegostart(callerpc)
  2882  	}
  2883  	if trace.enabled {
  2884  		traceGoCreate(newg, newg.startpc)
  2885  	}
  2886  	runqput(_p_, newg, true)
  2887  
  2888  	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && runtimeInitTime != 0 {
  2889  		wakep()
  2890  	}
  2891  	_g_.m.locks--
  2892  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
  2893  		_g_.stackguard0 = stackPreempt
  2894  	}
  2895  	return newg
  2896  }
  2897  
  2898  // Put on gfree list.
  2899  // If local list is too long, transfer a batch to the global list.
  2900  func gfput(_p_ *p, gp *g) {
  2901  	if readgstatus(gp) != _Gdead {
  2902  		throw("gfput: bad status (not Gdead)")
  2903  	}
  2904  
  2905  	stksize := gp.stackAlloc
  2906  
  2907  	if stksize != _FixedStack {
  2908  		// non-standard stack size - free it.
  2909  		stackfree(gp.stack, gp.stackAlloc)
  2910  		gp.stack.lo = 0
  2911  		gp.stack.hi = 0
  2912  		gp.stackguard0 = 0
  2913  		gp.stkbar = nil
  2914  		gp.stkbarPos = 0
  2915  	} else {
  2916  		// Reset stack barriers.
  2917  		gp.stkbar = gp.stkbar[:0]
  2918  		gp.stkbarPos = 0
  2919  	}
  2920  
  2921  	gp.schedlink.set(_p_.gfree)
  2922  	_p_.gfree = gp
  2923  	_p_.gfreecnt++
  2924  	if _p_.gfreecnt >= 64 {
  2925  		lock(&sched.gflock)
  2926  		for _p_.gfreecnt >= 32 {
  2927  			_p_.gfreecnt--
  2928  			gp = _p_.gfree
  2929  			_p_.gfree = gp.schedlink.ptr()
  2930  			if gp.stack.lo == 0 {
  2931  				gp.schedlink.set(sched.gfreeNoStack)
  2932  				sched.gfreeNoStack = gp
  2933  			} else {
  2934  				gp.schedlink.set(sched.gfreeStack)
  2935  				sched.gfreeStack = gp
  2936  			}
  2937  			sched.ngfree++
  2938  		}
  2939  		unlock(&sched.gflock)
  2940  	}
  2941  }
  2942  
  2943  // Get from gfree list.
  2944  // If local list is empty, grab a batch from global list.
  2945  func gfget(_p_ *p) *g {
  2946  retry:
  2947  	gp := _p_.gfree
  2948  	if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) {
  2949  		lock(&sched.gflock)
  2950  		for _p_.gfreecnt < 32 {
  2951  			if sched.gfreeStack != nil {
  2952  				// Prefer Gs with stacks.
  2953  				gp = sched.gfreeStack
  2954  				sched.gfreeStack = gp.schedlink.ptr()
  2955  			} else if sched.gfreeNoStack != nil {
  2956  				gp = sched.gfreeNoStack
  2957  				sched.gfreeNoStack = gp.schedlink.ptr()
  2958  			} else {
  2959  				break
  2960  			}
  2961  			_p_.gfreecnt++
  2962  			sched.ngfree--
  2963  			gp.schedlink.set(_p_.gfree)
  2964  			_p_.gfree = gp
  2965  		}
  2966  		unlock(&sched.gflock)
  2967  		goto retry
  2968  	}
  2969  	if gp != nil {
  2970  		_p_.gfree = gp.schedlink.ptr()
  2971  		_p_.gfreecnt--
  2972  		if gp.stack.lo == 0 {
  2973  			// Stack was deallocated in gfput. Allocate a new one.
  2974  			systemstack(func() {
  2975  				gp.stack, gp.stkbar = stackalloc(_FixedStack)
  2976  			})
  2977  			gp.stackguard0 = gp.stack.lo + _StackGuard
  2978  			gp.stackAlloc = _FixedStack
  2979  		} else {
  2980  			if raceenabled {
  2981  				racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc)
  2982  			}
  2983  			if msanenabled {
  2984  				msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc)
  2985  			}
  2986  		}
  2987  	}
  2988  	return gp
  2989  }
  2990  
  2991  // Purge all cached G's from gfree list to the global list.
  2992  func gfpurge(_p_ *p) {
  2993  	lock(&sched.gflock)
  2994  	for _p_.gfreecnt != 0 {
  2995  		_p_.gfreecnt--
  2996  		gp := _p_.gfree
  2997  		_p_.gfree = gp.schedlink.ptr()
  2998  		if gp.stack.lo == 0 {
  2999  			gp.schedlink.set(sched.gfreeNoStack)
  3000  			sched.gfreeNoStack = gp
  3001  		} else {
  3002  			gp.schedlink.set(sched.gfreeStack)
  3003  			sched.gfreeStack = gp
  3004  		}
  3005  		sched.ngfree++
  3006  	}
  3007  	unlock(&sched.gflock)
  3008  }
  3009  
  3010  // Breakpoint executes a breakpoint trap.
  3011  func Breakpoint() {
  3012  	breakpoint()
  3013  }
  3014  
  3015  // dolockOSThread is called by LockOSThread and lockOSThread below
  3016  // after they modify m.locked. Do not allow preemption during this call,
  3017  // or else the m might be different in this function than in the caller.
  3018  //go:nosplit
  3019  func dolockOSThread() {
  3020  	_g_ := getg()
  3021  	_g_.m.lockedg = _g_
  3022  	_g_.lockedm = _g_.m
  3023  }
  3024  
  3025  //go:nosplit
  3026  
  3027  // LockOSThread wires the calling goroutine to its current operating system thread.
  3028  // Until the calling goroutine exits or calls UnlockOSThread, it will always
  3029  // execute in that thread, and no other goroutine can.
  3030  func LockOSThread() {
  3031  	getg().m.locked |= _LockExternal
  3032  	dolockOSThread()
  3033  }
  3034  
  3035  //go:nosplit
  3036  func lockOSThread() {
  3037  	getg().m.locked += _LockInternal
  3038  	dolockOSThread()
  3039  }
  3040  
  3041  // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
  3042  // after they update m->locked. Do not allow preemption during this call,
  3043  // or else the m might be in different in this function than in the caller.
  3044  //go:nosplit
  3045  func dounlockOSThread() {
  3046  	_g_ := getg()
  3047  	if _g_.m.locked != 0 {
  3048  		return
  3049  	}
  3050  	_g_.m.lockedg = nil
  3051  	_g_.lockedm = nil
  3052  }
  3053  
  3054  //go:nosplit
  3055  
  3056  // UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
  3057  // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
  3058  func UnlockOSThread() {
  3059  	getg().m.locked &^= _LockExternal
  3060  	dounlockOSThread()
  3061  }
  3062  
  3063  //go:nosplit
  3064  func unlockOSThread() {
  3065  	_g_ := getg()
  3066  	if _g_.m.locked < _LockInternal {
  3067  		systemstack(badunlockosthread)
  3068  	}
  3069  	_g_.m.locked -= _LockInternal
  3070  	dounlockOSThread()
  3071  }
  3072  
  3073  func badunlockosthread() {
  3074  	throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
  3075  }
  3076  
  3077  func gcount() int32 {
  3078  	n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
  3079  	for i := 0; ; i++ {
  3080  		_p_ := allp[i]
  3081  		if _p_ == nil {
  3082  			break
  3083  		}
  3084  		n -= _p_.gfreecnt
  3085  	}
  3086  
  3087  	// All these variables can be changed concurrently, so the result can be inconsistent.
  3088  	// But at least the current goroutine is running.
  3089  	if n < 1 {
  3090  		n = 1
  3091  	}
  3092  	return n
  3093  }
  3094  
  3095  func mcount() int32 {
  3096  	return sched.mcount
  3097  }
  3098  
  3099  var prof struct {
  3100  	lock uint32
  3101  	hz   int32
  3102  }
  3103  
  3104  func _System()       { _System() }
  3105  func _ExternalCode() { _ExternalCode() }
  3106  func _GC()           { _GC() }
  3107  
  3108  // Called if we receive a SIGPROF signal.
  3109  // Called by the signal handler, may run during STW.
  3110  //go:nowritebarrierrec
  3111  func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
  3112  	if prof.hz == 0 {
  3113  		return
  3114  	}
  3115  
  3116  	// Profiling runs concurrently with GC, so it must not allocate.
  3117  	mp.mallocing++
  3118  
  3119  	// Define that a "user g" is a user-created goroutine, and a "system g"
  3120  	// is one that is m->g0 or m->gsignal.
  3121  	//
  3122  	// We might be interrupted for profiling halfway through a
  3123  	// goroutine switch. The switch involves updating three (or four) values:
  3124  	// g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
  3125  	// because once it gets updated the new g is running.
  3126  	//
  3127  	// When switching from a user g to a system g, LR is not considered live,
  3128  	// so the update only affects g, SP, and PC. Since PC must be last, there
  3129  	// the possible partial transitions in ordinary execution are (1) g alone is updated,
  3130  	// (2) both g and SP are updated, and (3) SP alone is updated.
  3131  	// If SP or g alone is updated, we can detect the partial transition by checking
  3132  	// whether the SP is within g's stack bounds. (We could also require that SP
  3133  	// be changed only after g, but the stack bounds check is needed by other
  3134  	// cases, so there is no need to impose an additional requirement.)
  3135  	//
  3136  	// There is one exceptional transition to a system g, not in ordinary execution.
  3137  	// When a signal arrives, the operating system starts the signal handler running
  3138  	// with an updated PC and SP. The g is updated last, at the beginning of the
  3139  	// handler. There are two reasons this is okay. First, until g is updated the
  3140  	// g and SP do not match, so the stack bounds check detects the partial transition.
  3141  	// Second, signal handlers currently run with signals disabled, so a profiling
  3142  	// signal cannot arrive during the handler.
  3143  	//
  3144  	// When switching from a system g to a user g, there are three possibilities.
  3145  	//
  3146  	// First, it may be that the g switch has no PC update, because the SP
  3147  	// either corresponds to a user g throughout (as in asmcgocall)
  3148  	// or because it has been arranged to look like a user g frame
  3149  	// (as in cgocallback_gofunc). In this case, since the entire
  3150  	// transition is a g+SP update, a partial transition updating just one of
  3151  	// those will be detected by the stack bounds check.
  3152  	//
  3153  	// Second, when returning from a signal handler, the PC and SP updates
  3154  	// are performed by the operating system in an atomic update, so the g
  3155  	// update must be done before them. The stack bounds check detects
  3156  	// the partial transition here, and (again) signal handlers run with signals
  3157  	// disabled, so a profiling signal cannot arrive then anyway.
  3158  	//
  3159  	// Third, the common case: it may be that the switch updates g, SP, and PC
  3160  	// separately. If the PC is within any of the functions that does this,
  3161  	// we don't ask for a traceback. C.F. the function setsSP for more about this.
  3162  	//
  3163  	// There is another apparently viable approach, recorded here in case
  3164  	// the "PC within setsSP function" check turns out not to be usable.
  3165  	// It would be possible to delay the update of either g or SP until immediately
  3166  	// before the PC update instruction. Then, because of the stack bounds check,
  3167  	// the only problematic interrupt point is just before that PC update instruction,
  3168  	// and the sigprof handler can detect that instruction and simulate stepping past
  3169  	// it in order to reach a consistent state. On ARM, the update of g must be made
  3170  	// in two places (in R10 and also in a TLS slot), so the delayed update would
  3171  	// need to be the SP update. The sigprof handler must read the instruction at
  3172  	// the current PC and if it was the known instruction (for example, JMP BX or
  3173  	// MOV R2, PC), use that other register in place of the PC value.
  3174  	// The biggest drawback to this solution is that it requires that we can tell
  3175  	// whether it's safe to read from the memory pointed at by PC.
  3176  	// In a correct program, we can test PC == nil and otherwise read,
  3177  	// but if a profiling signal happens at the instant that a program executes
  3178  	// a bad jump (before the program manages to handle the resulting fault)
  3179  	// the profiling handler could fault trying to read nonexistent memory.
  3180  	//
  3181  	// To recap, there are no constraints on the assembly being used for the
  3182  	// transition. We simply require that g and SP match and that the PC is not
  3183  	// in gogo.
  3184  	traceback := true
  3185  	if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) {
  3186  		traceback = false
  3187  	}
  3188  	var stk [maxCPUProfStack]uintptr
  3189  	var haveStackLock *g
  3190  	n := 0
  3191  	if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
  3192  		cgoOff := 0
  3193  		// Check cgoCallersUse to make sure that we are not
  3194  		// interrupting other code that is fiddling with
  3195  		// cgoCallers.  We are running in a signal handler
  3196  		// with all signals blocked, so we don't have to worry
  3197  		// about any other code interrupting us.
  3198  		if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
  3199  			for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
  3200  				cgoOff++
  3201  			}
  3202  			copy(stk[:], mp.cgoCallers[:cgoOff])
  3203  			mp.cgoCallers[0] = 0
  3204  		}
  3205  
  3206  		// Collect Go stack that leads to the cgo call.
  3207  		if gcTryLockStackBarriers(mp.curg) {
  3208  			haveStackLock = mp.curg
  3209  			n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
  3210  		}
  3211  	} else if traceback {
  3212  		var flags uint = _TraceTrap
  3213  		if gp.m.curg != nil && gcTryLockStackBarriers(gp.m.curg) {
  3214  			// It's safe to traceback the user stack.
  3215  			haveStackLock = gp.m.curg
  3216  			flags |= _TraceJumpStack
  3217  		}
  3218  		// Traceback is safe if we're on the system stack (if
  3219  		// necessary, flags will stop it before switching to
  3220  		// the user stack), or if we locked the user stack.
  3221  		if gp != gp.m.curg || haveStackLock != nil {
  3222  			n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, flags)
  3223  		}
  3224  	}
  3225  	if haveStackLock != nil {
  3226  		gcUnlockStackBarriers(haveStackLock)
  3227  	}
  3228  
  3229  	if n <= 0 {
  3230  		// Normal traceback is impossible or has failed.
  3231  		// See if it falls into several common cases.
  3232  		n = 0
  3233  		if GOOS == "windows" && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
  3234  			// Libcall, i.e. runtime syscall on windows.
  3235  			// Collect Go stack that leads to the call.
  3236  			if gcTryLockStackBarriers(mp.libcallg.ptr()) {
  3237  				n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
  3238  				gcUnlockStackBarriers(mp.libcallg.ptr())
  3239  			}
  3240  		}
  3241  		if n == 0 {
  3242  			// If all of the above has failed, account it against abstract "System" or "GC".
  3243  			n = 2
  3244  			// "ExternalCode" is better than "etext".
  3245  			if pc > firstmoduledata.etext {
  3246  				pc = funcPC(_ExternalCode) + sys.PCQuantum
  3247  			}
  3248  			stk[0] = pc
  3249  			if mp.preemptoff != "" || mp.helpgc != 0 {
  3250  				stk[1] = funcPC(_GC) + sys.PCQuantum
  3251  			} else {
  3252  				stk[1] = funcPC(_System) + sys.PCQuantum
  3253  			}
  3254  		}
  3255  	}
  3256  
  3257  	if prof.hz != 0 {
  3258  		// Simple cas-lock to coordinate with setcpuprofilerate.
  3259  		for !atomic.Cas(&prof.lock, 0, 1) {
  3260  			osyield()
  3261  		}
  3262  		if prof.hz != 0 {
  3263  			cpuprof.add(stk[:n])
  3264  		}
  3265  		atomic.Store(&prof.lock, 0)
  3266  	}
  3267  	mp.mallocing--
  3268  }
  3269  
  3270  // If the signal handler receives a SIGPROF signal on a non-Go thread,
  3271  // it tries to collect a traceback into sigprofCallers.
  3272  // sigprofCallersUse is set to non-zero while sigprofCallers holds a traceback.
  3273  var sigprofCallers cgoCallers
  3274  var sigprofCallersUse uint32
  3275  
  3276  // sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
  3277  // and the signal handler collected a stack trace in sigprofCallers.
  3278  // When this is called, sigprofCallersUse will be non-zero.
  3279  // g is nil, and what we can do is very limited.
  3280  //go:nosplit
  3281  //go:nowritebarrierrec
  3282  func sigprofNonGo() {
  3283  	if prof.hz != 0 {
  3284  		n := 0
  3285  		for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
  3286  			n++
  3287  		}
  3288  
  3289  		// Simple cas-lock to coordinate with setcpuprofilerate.
  3290  		for !atomic.Cas(&prof.lock, 0, 1) {
  3291  			osyield()
  3292  		}
  3293  		if prof.hz != 0 {
  3294  			cpuprof.addNonGo(sigprofCallers[:n])
  3295  		}
  3296  		atomic.Store(&prof.lock, 0)
  3297  	}
  3298  
  3299  	atomic.Store(&sigprofCallersUse, 0)
  3300  }
  3301  
  3302  // sigprofNonGoPC is called when a profiling signal arrived on a
  3303  // non-Go thread and we have a single PC value, not a stack trace.
  3304  // g is nil, and what we can do is very limited.
  3305  //go:nosplit
  3306  //go:nowritebarrierrec
  3307  func sigprofNonGoPC(pc uintptr) {
  3308  	if prof.hz != 0 {
  3309  		pc := []uintptr{
  3310  			pc,
  3311  			funcPC(_ExternalCode) + sys.PCQuantum,
  3312  		}
  3313  
  3314  		// Simple cas-lock to coordinate with setcpuprofilerate.
  3315  		for !atomic.Cas(&prof.lock, 0, 1) {
  3316  			osyield()
  3317  		}
  3318  		if prof.hz != 0 {
  3319  			cpuprof.addNonGo(pc)
  3320  		}
  3321  		atomic.Store(&prof.lock, 0)
  3322  	}
  3323  }
  3324  
  3325  // Reports whether a function will set the SP
  3326  // to an absolute value. Important that
  3327  // we don't traceback when these are at the bottom
  3328  // of the stack since we can't be sure that we will
  3329  // find the caller.
  3330  //
  3331  // If the function is not on the bottom of the stack
  3332  // we assume that it will have set it up so that traceback will be consistent,
  3333  // either by being a traceback terminating function
  3334  // or putting one on the stack at the right offset.
  3335  func setsSP(pc uintptr) bool {
  3336  	f := findfunc(pc)
  3337  	if f == nil {
  3338  		// couldn't find the function for this PC,
  3339  		// so assume the worst and stop traceback
  3340  		return true
  3341  	}
  3342  	switch f.entry {
  3343  	case gogoPC, systemstackPC, mcallPC, morestackPC:
  3344  		return true
  3345  	}
  3346  	return false
  3347  }
  3348  
  3349  // Arrange to call fn with a traceback hz times a second.
  3350  func setcpuprofilerate_m(hz int32) {
  3351  	// Force sane arguments.
  3352  	if hz < 0 {
  3353  		hz = 0
  3354  	}
  3355  
  3356  	// Disable preemption, otherwise we can be rescheduled to another thread
  3357  	// that has profiling enabled.
  3358  	_g_ := getg()
  3359  	_g_.m.locks++
  3360  
  3361  	// Stop profiler on this thread so that it is safe to lock prof.
  3362  	// if a profiling signal came in while we had prof locked,
  3363  	// it would deadlock.
  3364  	resetcpuprofiler(0)
  3365  
  3366  	for !atomic.Cas(&prof.lock, 0, 1) {
  3367  		osyield()
  3368  	}
  3369  	prof.hz = hz
  3370  	atomic.Store(&prof.lock, 0)
  3371  
  3372  	lock(&sched.lock)
  3373  	sched.profilehz = hz
  3374  	unlock(&sched.lock)
  3375  
  3376  	if hz != 0 {
  3377  		resetcpuprofiler(hz)
  3378  	}
  3379  
  3380  	_g_.m.locks--
  3381  }
  3382  
  3383  // Change number of processors. The world is stopped, sched is locked.
  3384  // gcworkbufs are not being modified by either the GC or
  3385  // the write barrier code.
  3386  // Returns list of Ps with local work, they need to be scheduled by the caller.
  3387  func procresize(nprocs int32) *p {
  3388  	old := gomaxprocs
  3389  	if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs {
  3390  		throw("procresize: invalid arg")
  3391  	}
  3392  	if trace.enabled {
  3393  		traceGomaxprocs(nprocs)
  3394  	}
  3395  
  3396  	// update statistics
  3397  	now := nanotime()
  3398  	if sched.procresizetime != 0 {
  3399  		sched.totaltime += int64(old) * (now - sched.procresizetime)
  3400  	}
  3401  	sched.procresizetime = now
  3402  
  3403  	// initialize new P's
  3404  	for i := int32(0); i < nprocs; i++ {
  3405  		pp := allp[i]
  3406  		if pp == nil {
  3407  			pp = new(p)
  3408  			pp.id = i
  3409  			pp.status = _Pgcstop
  3410  			pp.sudogcache = pp.sudogbuf[:0]
  3411  			for i := range pp.deferpool {
  3412  				pp.deferpool[i] = pp.deferpoolbuf[i][:0]
  3413  			}
  3414  			atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
  3415  		}
  3416  		if pp.mcache == nil {
  3417  			if old == 0 && i == 0 {
  3418  				if getg().m.mcache == nil {
  3419  					throw("missing mcache?")
  3420  				}
  3421  				pp.mcache = getg().m.mcache // bootstrap
  3422  			} else {
  3423  				pp.mcache = allocmcache()
  3424  			}
  3425  		}
  3426  		if raceenabled && pp.racectx == 0 {
  3427  			if old == 0 && i == 0 {
  3428  				pp.racectx = raceprocctx0
  3429  				raceprocctx0 = 0 // bootstrap
  3430  			} else {
  3431  				pp.racectx = raceproccreate()
  3432  			}
  3433  		}
  3434  	}
  3435  
  3436  	// free unused P's
  3437  	for i := nprocs; i < old; i++ {
  3438  		p := allp[i]
  3439  		if trace.enabled {
  3440  			if p == getg().m.p.ptr() {
  3441  				// moving to p[0], pretend that we were descheduled
  3442  				// and then scheduled again to keep the trace sane.
  3443  				traceGoSched()
  3444  				traceProcStop(p)
  3445  			}
  3446  		}
  3447  		// move all runnable goroutines to the global queue
  3448  		for p.runqhead != p.runqtail {
  3449  			// pop from tail of local queue
  3450  			p.runqtail--
  3451  			gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr()
  3452  			// push onto head of global queue
  3453  			globrunqputhead(gp)
  3454  		}
  3455  		if p.runnext != 0 {
  3456  			globrunqputhead(p.runnext.ptr())
  3457  			p.runnext = 0
  3458  		}
  3459  		// if there's a background worker, make it runnable and put
  3460  		// it on the global queue so it can clean itself up
  3461  		if gp := p.gcBgMarkWorker.ptr(); gp != nil {
  3462  			casgstatus(gp, _Gwaiting, _Grunnable)
  3463  			if trace.enabled {
  3464  				traceGoUnpark(gp, 0)
  3465  			}
  3466  			globrunqput(gp)
  3467  			// This assignment doesn't race because the
  3468  			// world is stopped.
  3469  			p.gcBgMarkWorker.set(nil)
  3470  		}
  3471  		for i := range p.sudogbuf {
  3472  			p.sudogbuf[i] = nil
  3473  		}
  3474  		p.sudogcache = p.sudogbuf[:0]
  3475  		for i := range p.deferpool {
  3476  			for j := range p.deferpoolbuf[i] {
  3477  				p.deferpoolbuf[i][j] = nil
  3478  			}
  3479  			p.deferpool[i] = p.deferpoolbuf[i][:0]
  3480  		}
  3481  		freemcache(p.mcache)
  3482  		p.mcache = nil
  3483  		gfpurge(p)
  3484  		traceProcFree(p)
  3485  		if raceenabled {
  3486  			raceprocdestroy(p.racectx)
  3487  			p.racectx = 0
  3488  		}
  3489  		p.status = _Pdead
  3490  		// can't free P itself because it can be referenced by an M in syscall
  3491  	}
  3492  
  3493  	_g_ := getg()
  3494  	if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
  3495  		// continue to use the current P
  3496  		_g_.m.p.ptr().status = _Prunning
  3497  	} else {
  3498  		// release the current P and acquire allp[0]
  3499  		if _g_.m.p != 0 {
  3500  			_g_.m.p.ptr().m = 0
  3501  		}
  3502  		_g_.m.p = 0
  3503  		_g_.m.mcache = nil
  3504  		p := allp[0]
  3505  		p.m = 0
  3506  		p.status = _Pidle
  3507  		acquirep(p)
  3508  		if trace.enabled {
  3509  			traceGoStart()
  3510  		}
  3511  	}
  3512  	var runnablePs *p
  3513  	for i := nprocs - 1; i >= 0; i-- {
  3514  		p := allp[i]
  3515  		if _g_.m.p.ptr() == p {
  3516  			continue
  3517  		}
  3518  		p.status = _Pidle
  3519  		if runqempty(p) {
  3520  			pidleput(p)
  3521  		} else {
  3522  			p.m.set(mget())
  3523  			p.link.set(runnablePs)
  3524  			runnablePs = p
  3525  		}
  3526  	}
  3527  	stealOrder.reset(uint32(nprocs))
  3528  	var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
  3529  	atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
  3530  	return runnablePs
  3531  }
  3532  
  3533  // Associate p and the current m.
  3534  //
  3535  // This function is allowed to have write barriers even if the caller
  3536  // isn't because it immediately acquires _p_.
  3537  //
  3538  //go:yeswritebarrierrec
  3539  func acquirep(_p_ *p) {
  3540  	// Do the part that isn't allowed to have write barriers.
  3541  	acquirep1(_p_)
  3542  
  3543  	// have p; write barriers now allowed
  3544  	_g_ := getg()
  3545  	_g_.m.mcache = _p_.mcache
  3546  
  3547  	if trace.enabled {
  3548  		traceProcStart()
  3549  	}
  3550  }
  3551  
  3552  // acquirep1 is the first step of acquirep, which actually acquires
  3553  // _p_. This is broken out so we can disallow write barriers for this
  3554  // part, since we don't yet have a P.
  3555  //
  3556  //go:nowritebarrierrec
  3557  func acquirep1(_p_ *p) {
  3558  	_g_ := getg()
  3559  
  3560  	if _g_.m.p != 0 || _g_.m.mcache != nil {
  3561  		throw("acquirep: already in go")
  3562  	}
  3563  	if _p_.m != 0 || _p_.status != _Pidle {
  3564  		id := int32(0)
  3565  		if _p_.m != 0 {
  3566  			id = _p_.m.ptr().id
  3567  		}
  3568  		print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
  3569  		throw("acquirep: invalid p state")
  3570  	}
  3571  	_g_.m.p.set(_p_)
  3572  	_p_.m.set(_g_.m)
  3573  	_p_.status = _Prunning
  3574  }
  3575  
  3576  // Disassociate p and the current m.
  3577  func releasep() *p {
  3578  	_g_ := getg()
  3579  
  3580  	if _g_.m.p == 0 || _g_.m.mcache == nil {
  3581  		throw("releasep: invalid arg")
  3582  	}
  3583  	_p_ := _g_.m.p.ptr()
  3584  	if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
  3585  		print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
  3586  		throw("releasep: invalid p state")
  3587  	}
  3588  	if trace.enabled {
  3589  		traceProcStop(_g_.m.p.ptr())
  3590  	}
  3591  	_g_.m.p = 0
  3592  	_g_.m.mcache = nil
  3593  	_p_.m = 0
  3594  	_p_.status = _Pidle
  3595  	return _p_
  3596  }
  3597  
  3598  func incidlelocked(v int32) {
  3599  	lock(&sched.lock)
  3600  	sched.nmidlelocked += v
  3601  	if v > 0 {
  3602  		checkdead()
  3603  	}
  3604  	unlock(&sched.lock)
  3605  }
  3606  
  3607  // Check for deadlock situation.
  3608  // The check is based on number of running M's, if 0 -> deadlock.
  3609  func checkdead() {
  3610  	// For -buildmode=c-shared or -buildmode=c-archive it's OK if
  3611  	// there are no running goroutines. The calling program is
  3612  	// assumed to be running.
  3613  	if islibrary || isarchive {
  3614  		return
  3615  	}
  3616  
  3617  	// If we are dying because of a signal caught on an already idle thread,
  3618  	// freezetheworld will cause all running threads to block.
  3619  	// And runtime will essentially enter into deadlock state,
  3620  	// except that there is a thread that will call exit soon.
  3621  	if panicking > 0 {
  3622  		return
  3623  	}
  3624  
  3625  	// -1 for sysmon
  3626  	run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1
  3627  	if run > 0 {
  3628  		return
  3629  	}
  3630  	if run < 0 {
  3631  		print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n")
  3632  		throw("checkdead: inconsistent counts")
  3633  	}
  3634  
  3635  	grunning := 0
  3636  	lock(&allglock)
  3637  	for i := 0; i < len(allgs); i++ {
  3638  		gp := allgs[i]
  3639  		if isSystemGoroutine(gp) {
  3640  			continue
  3641  		}
  3642  		s := readgstatus(gp)
  3643  		switch s &^ _Gscan {
  3644  		case _Gwaiting:
  3645  			grunning++
  3646  		case _Grunnable,
  3647  			_Grunning,
  3648  			_Gsyscall:
  3649  			unlock(&allglock)
  3650  			print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
  3651  			throw("checkdead: runnable g")
  3652  		}
  3653  	}
  3654  	unlock(&allglock)
  3655  	if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
  3656  		throw("no goroutines (main called runtime.Goexit) - deadlock!")
  3657  	}
  3658  
  3659  	// Maybe jump time forward for playground.
  3660  	gp := timejump()
  3661  	if gp != nil {
  3662  		casgstatus(gp, _Gwaiting, _Grunnable)
  3663  		globrunqput(gp)
  3664  		_p_ := pidleget()
  3665  		if _p_ == nil {
  3666  			throw("checkdead: no p for timer")
  3667  		}
  3668  		mp := mget()
  3669  		if mp == nil {
  3670  			// There should always be a free M since
  3671  			// nothing is running.
  3672  			throw("checkdead: no m for timer")
  3673  		}
  3674  		mp.nextp.set(_p_)
  3675  		notewakeup(&mp.park)
  3676  		return
  3677  	}
  3678  
  3679  	getg().m.throwing = -1 // do not dump full stacks
  3680  	throw("all goroutines are asleep - deadlock!")
  3681  }
  3682  
  3683  // forcegcperiod is the maximum time in nanoseconds between garbage
  3684  // collections. If we go this long without a garbage collection, one
  3685  // is forced to run.
  3686  //
  3687  // This is a variable for testing purposes. It normally doesn't change.
  3688  var forcegcperiod int64 = 2 * 60 * 1e9
  3689  
  3690  // Always runs without a P, so write barriers are not allowed.
  3691  //
  3692  //go:nowritebarrierrec
  3693  func sysmon() {
  3694  	// If a heap span goes unused for 5 minutes after a garbage collection,
  3695  	// we hand it back to the operating system.
  3696  	scavengelimit := int64(5 * 60 * 1e9)
  3697  
  3698  	if debug.scavenge > 0 {
  3699  		// Scavenge-a-lot for testing.
  3700  		forcegcperiod = 10 * 1e6
  3701  		scavengelimit = 20 * 1e6
  3702  	}
  3703  
  3704  	lastscavenge := nanotime()
  3705  	nscavenge := 0
  3706  
  3707  	lasttrace := int64(0)
  3708  	idle := 0 // how many cycles in succession we had not wokeup somebody
  3709  	delay := uint32(0)
  3710  	for {
  3711  		if idle == 0 { // start with 20us sleep...
  3712  			delay = 20
  3713  		} else if idle > 50 { // start doubling the sleep after 1ms...
  3714  			delay *= 2
  3715  		}
  3716  		if delay > 10*1000 { // up to 10ms
  3717  			delay = 10 * 1000
  3718  		}
  3719  		usleep(delay)
  3720  		if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
  3721  			lock(&sched.lock)
  3722  			if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
  3723  				atomic.Store(&sched.sysmonwait, 1)
  3724  				unlock(&sched.lock)
  3725  				// Make wake-up period small enough
  3726  				// for the sampling to be correct.
  3727  				maxsleep := forcegcperiod / 2
  3728  				if scavengelimit < forcegcperiod {
  3729  					maxsleep = scavengelimit / 2
  3730  				}
  3731  				notetsleep(&sched.sysmonnote, maxsleep)
  3732  				lock(&sched.lock)
  3733  				atomic.Store(&sched.sysmonwait, 0)
  3734  				noteclear(&sched.sysmonnote)
  3735  				idle = 0
  3736  				delay = 20
  3737  			}
  3738  			unlock(&sched.lock)
  3739  		}
  3740  		// poll network if not polled for more than 10ms
  3741  		lastpoll := int64(atomic.Load64(&sched.lastpoll))
  3742  		now := nanotime()
  3743  		unixnow := unixnanotime()
  3744  		if lastpoll != 0 && lastpoll+10*1000*1000 < now {
  3745  			atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
  3746  			gp := netpoll(false) // non-blocking - returns list of goroutines
  3747  			if gp != nil {
  3748  				// Need to decrement number of idle locked M's
  3749  				// (pretending that one more is running) before injectglist.
  3750  				// Otherwise it can lead to the following situation:
  3751  				// injectglist grabs all P's but before it starts M's to run the P's,
  3752  				// another M returns from syscall, finishes running its G,
  3753  				// observes that there is no work to do and no other running M's
  3754  				// and reports deadlock.
  3755  				incidlelocked(-1)
  3756  				injectglist(gp)
  3757  				incidlelocked(1)
  3758  			}
  3759  		}
  3760  		// retake P's blocked in syscalls
  3761  		// and preempt long running G's
  3762  		if retake(now) != 0 {
  3763  			idle = 0
  3764  		} else {
  3765  			idle++
  3766  		}
  3767  		// check if we need to force a GC
  3768  		lastgc := int64(atomic.Load64(&memstats.last_gc))
  3769  		if gcphase == _GCoff && lastgc != 0 && unixnow-lastgc > forcegcperiod && atomic.Load(&forcegc.idle) != 0 {
  3770  			lock(&forcegc.lock)
  3771  			forcegc.idle = 0
  3772  			forcegc.g.schedlink = 0
  3773  			injectglist(forcegc.g)
  3774  			unlock(&forcegc.lock)
  3775  		}
  3776  		// scavenge heap once in a while
  3777  		if lastscavenge+scavengelimit/2 < now {
  3778  			mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
  3779  			lastscavenge = now
  3780  			nscavenge++
  3781  		}
  3782  		if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
  3783  			lasttrace = now
  3784  			schedtrace(debug.scheddetail > 0)
  3785  		}
  3786  	}
  3787  }
  3788  
  3789  var pdesc [_MaxGomaxprocs]struct {
  3790  	schedtick   uint32
  3791  	schedwhen   int64
  3792  	syscalltick uint32
  3793  	syscallwhen int64
  3794  }
  3795  
  3796  // forcePreemptNS is the time slice given to a G before it is
  3797  // preempted.
  3798  const forcePreemptNS = 10 * 1000 * 1000 // 10ms
  3799  
  3800  func retake(now int64) uint32 {
  3801  	n := 0
  3802  	for i := int32(0); i < gomaxprocs; i++ {
  3803  		_p_ := allp[i]
  3804  		if _p_ == nil {
  3805  			continue
  3806  		}
  3807  		pd := &pdesc[i]
  3808  		s := _p_.status
  3809  		if s == _Psyscall {
  3810  			// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
  3811  			t := int64(_p_.syscalltick)
  3812  			if int64(pd.syscalltick) != t {
  3813  				pd.syscalltick = uint32(t)
  3814  				pd.syscallwhen = now
  3815  				continue
  3816  			}
  3817  			// On the one hand we don't want to retake Ps if there is no other work to do,
  3818  			// but on the other hand we want to retake them eventually
  3819  			// because they can prevent the sysmon thread from deep sleep.
  3820  			if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
  3821  				continue
  3822  			}
  3823  			// Need to decrement number of idle locked M's
  3824  			// (pretending that one more is running) before the CAS.
  3825  			// Otherwise the M from which we retake can exit the syscall,
  3826  			// increment nmidle and report deadlock.
  3827  			incidlelocked(-1)
  3828  			if atomic.Cas(&_p_.status, s, _Pidle) {
  3829  				if trace.enabled {
  3830  					traceGoSysBlock(_p_)
  3831  					traceProcStop(_p_)
  3832  				}
  3833  				n++
  3834  				_p_.syscalltick++
  3835  				handoffp(_p_)
  3836  			}
  3837  			incidlelocked(1)
  3838  		} else if s == _Prunning {
  3839  			// Preempt G if it's running for too long.
  3840  			t := int64(_p_.schedtick)
  3841  			if int64(pd.schedtick) != t {
  3842  				pd.schedtick = uint32(t)
  3843  				pd.schedwhen = now
  3844  				continue
  3845  			}
  3846  			if pd.schedwhen+forcePreemptNS > now {
  3847  				continue
  3848  			}
  3849  			preemptone(_p_)
  3850  		}
  3851  	}
  3852  	return uint32(n)
  3853  }
  3854  
  3855  // Tell all goroutines that they have been preempted and they should stop.
  3856  // This function is purely best-effort. It can fail to inform a goroutine if a
  3857  // processor just started running it.
  3858  // No locks need to be held.
  3859  // Returns true if preemption request was issued to at least one goroutine.
  3860  func preemptall() bool {
  3861  	res := false
  3862  	for i := int32(0); i < gomaxprocs; i++ {
  3863  		_p_ := allp[i]
  3864  		if _p_ == nil || _p_.status != _Prunning {
  3865  			continue
  3866  		}
  3867  		if preemptone(_p_) {
  3868  			res = true
  3869  		}
  3870  	}
  3871  	return res
  3872  }
  3873  
  3874  // Tell the goroutine running on processor P to stop.
  3875  // This function is purely best-effort. It can incorrectly fail to inform the
  3876  // goroutine. It can send inform the wrong goroutine. Even if it informs the
  3877  // correct goroutine, that goroutine might ignore the request if it is
  3878  // simultaneously executing newstack.
  3879  // No lock needs to be held.
  3880  // Returns true if preemption request was issued.
  3881  // The actual preemption will happen at some point in the future
  3882  // and will be indicated by the gp->status no longer being
  3883  // Grunning
  3884  func preemptone(_p_ *p) bool {
  3885  	mp := _p_.m.ptr()
  3886  	if mp == nil || mp == getg().m {
  3887  		return false
  3888  	}
  3889  	gp := mp.curg
  3890  	if gp == nil || gp == mp.g0 {
  3891  		return false
  3892  	}
  3893  
  3894  	gp.preempt = true
  3895  
  3896  	// Every call in a go routine checks for stack overflow by
  3897  	// comparing the current stack pointer to gp->stackguard0.
  3898  	// Setting gp->stackguard0 to StackPreempt folds
  3899  	// preemption into the normal stack overflow check.
  3900  	gp.stackguard0 = stackPreempt
  3901  	return true
  3902  }
  3903  
  3904  var starttime int64
  3905  
  3906  func schedtrace(detailed bool) {
  3907  	now := nanotime()
  3908  	if starttime == 0 {
  3909  		starttime = now
  3910  	}
  3911  
  3912  	lock(&sched.lock)
  3913  	print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
  3914  	if detailed {
  3915  		print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
  3916  	}
  3917  	// We must be careful while reading data from P's, M's and G's.
  3918  	// Even if we hold schedlock, most data can be changed concurrently.
  3919  	// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
  3920  	for i := int32(0); i < gomaxprocs; i++ {
  3921  		_p_ := allp[i]
  3922  		if _p_ == nil {
  3923  			continue
  3924  		}
  3925  		mp := _p_.m.ptr()
  3926  		h := atomic.Load(&_p_.runqhead)
  3927  		t := atomic.Load(&_p_.runqtail)
  3928  		if detailed {
  3929  			id := int32(-1)
  3930  			if mp != nil {
  3931  				id = mp.id
  3932  			}
  3933  			print("  P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
  3934  		} else {
  3935  			// In non-detailed mode format lengths of per-P run queues as:
  3936  			// [len1 len2 len3 len4]
  3937  			print(" ")
  3938  			if i == 0 {
  3939  				print("[")
  3940  			}
  3941  			print(t - h)
  3942  			if i == gomaxprocs-1 {
  3943  				print("]\n")
  3944  			}
  3945  		}
  3946  	}
  3947  
  3948  	if !detailed {
  3949  		unlock(&sched.lock)
  3950  		return
  3951  	}
  3952  
  3953  	for mp := allm; mp != nil; mp = mp.alllink {
  3954  		_p_ := mp.p.ptr()
  3955  		gp := mp.curg
  3956  		lockedg := mp.lockedg
  3957  		id1 := int32(-1)
  3958  		if _p_ != nil {
  3959  			id1 = _p_.id
  3960  		}
  3961  		id2 := int64(-1)
  3962  		if gp != nil {
  3963  			id2 = gp.goid
  3964  		}
  3965  		id3 := int64(-1)
  3966  		if lockedg != nil {
  3967  			id3 = lockedg.goid
  3968  		}
  3969  		print("  M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
  3970  	}
  3971  
  3972  	lock(&allglock)
  3973  	for gi := 0; gi < len(allgs); gi++ {
  3974  		gp := allgs[gi]
  3975  		mp := gp.m
  3976  		lockedm := gp.lockedm
  3977  		id1 := int32(-1)
  3978  		if mp != nil {
  3979  			id1 = mp.id
  3980  		}
  3981  		id2 := int32(-1)
  3982  		if lockedm != nil {
  3983  			id2 = lockedm.id
  3984  		}
  3985  		print("  G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
  3986  	}
  3987  	unlock(&allglock)
  3988  	unlock(&sched.lock)
  3989  }
  3990  
  3991  // Put mp on midle list.
  3992  // Sched must be locked.
  3993  // May run during STW, so write barriers are not allowed.
  3994  //go:nowritebarrierrec
  3995  func mput(mp *m) {
  3996  	mp.schedlink = sched.midle
  3997  	sched.midle.set(mp)
  3998  	sched.nmidle++
  3999  	checkdead()
  4000  }
  4001  
  4002  // Try to get an m from midle list.
  4003  // Sched must be locked.
  4004  // May run during STW, so write barriers are not allowed.
  4005  //go:nowritebarrierrec
  4006  func mget() *m {
  4007  	mp := sched.midle.ptr()
  4008  	if mp != nil {
  4009  		sched.midle = mp.schedlink
  4010  		sched.nmidle--
  4011  	}
  4012  	return mp
  4013  }
  4014  
  4015  // Put gp on the global runnable queue.
  4016  // Sched must be locked.
  4017  // May run during STW, so write barriers are not allowed.
  4018  //go:nowritebarrierrec
  4019  func globrunqput(gp *g) {
  4020  	gp.schedlink = 0
  4021  	if sched.runqtail != 0 {
  4022  		sched.runqtail.ptr().schedlink.set(gp)
  4023  	} else {
  4024  		sched.runqhead.set(gp)
  4025  	}
  4026  	sched.runqtail.set(gp)
  4027  	sched.runqsize++
  4028  }
  4029  
  4030  // Put gp at the head of the global runnable queue.
  4031  // Sched must be locked.
  4032  // May run during STW, so write barriers are not allowed.
  4033  //go:nowritebarrierrec
  4034  func globrunqputhead(gp *g) {
  4035  	gp.schedlink = sched.runqhead
  4036  	sched.runqhead.set(gp)
  4037  	if sched.runqtail == 0 {
  4038  		sched.runqtail.set(gp)
  4039  	}
  4040  	sched.runqsize++
  4041  }
  4042  
  4043  // Put a batch of runnable goroutines on the global runnable queue.
  4044  // Sched must be locked.
  4045  func globrunqputbatch(ghead *g, gtail *g, n int32) {
  4046  	gtail.schedlink = 0
  4047  	if sched.runqtail != 0 {
  4048  		sched.runqtail.ptr().schedlink.set(ghead)
  4049  	} else {
  4050  		sched.runqhead.set(ghead)
  4051  	}
  4052  	sched.runqtail.set(gtail)
  4053  	sched.runqsize += n
  4054  }
  4055  
  4056  // Try get a batch of G's from the global runnable queue.
  4057  // Sched must be locked.
  4058  func globrunqget(_p_ *p, max int32) *g {
  4059  	if sched.runqsize == 0 {
  4060  		return nil
  4061  	}
  4062  
  4063  	n := sched.runqsize/gomaxprocs + 1
  4064  	if n > sched.runqsize {
  4065  		n = sched.runqsize
  4066  	}
  4067  	if max > 0 && n > max {
  4068  		n = max
  4069  	}
  4070  	if n > int32(len(_p_.runq))/2 {
  4071  		n = int32(len(_p_.runq)) / 2
  4072  	}
  4073  
  4074  	sched.runqsize -= n
  4075  	if sched.runqsize == 0 {
  4076  		sched.runqtail = 0
  4077  	}
  4078  
  4079  	gp := sched.runqhead.ptr()
  4080  	sched.runqhead = gp.schedlink
  4081  	n--
  4082  	for ; n > 0; n-- {
  4083  		gp1 := sched.runqhead.ptr()
  4084  		sched.runqhead = gp1.schedlink
  4085  		runqput(_p_, gp1, false)
  4086  	}
  4087  	return gp
  4088  }
  4089  
  4090  // Put p to on _Pidle list.
  4091  // Sched must be locked.
  4092  // May run during STW, so write barriers are not allowed.
  4093  //go:nowritebarrierrec
  4094  func pidleput(_p_ *p) {
  4095  	if !runqempty(_p_) {
  4096  		throw("pidleput: P has non-empty run queue")
  4097  	}
  4098  	_p_.link = sched.pidle
  4099  	sched.pidle.set(_p_)
  4100  	atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
  4101  }
  4102  
  4103  // Try get a p from _Pidle list.
  4104  // Sched must be locked.
  4105  // May run during STW, so write barriers are not allowed.
  4106  //go:nowritebarrierrec
  4107  func pidleget() *p {
  4108  	_p_ := sched.pidle.ptr()
  4109  	if _p_ != nil {
  4110  		sched.pidle = _p_.link
  4111  		atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
  4112  	}
  4113  	return _p_
  4114  }
  4115  
  4116  // runqempty returns true if _p_ has no Gs on its local run queue.
  4117  // It never returns true spuriously.
  4118  func runqempty(_p_ *p) bool {
  4119  	// Defend against a race where 1) _p_ has G1 in runqnext but runqhead == runqtail,
  4120  	// 2) runqput on _p_ kicks G1 to the runq, 3) runqget on _p_ empties runqnext.
  4121  	// Simply observing that runqhead == runqtail and then observing that runqnext == nil
  4122  	// does not mean the queue is empty.
  4123  	for {
  4124  		head := atomic.Load(&_p_.runqhead)
  4125  		tail := atomic.Load(&_p_.runqtail)
  4126  		runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
  4127  		if tail == atomic.Load(&_p_.runqtail) {
  4128  			return head == tail && runnext == 0
  4129  		}
  4130  	}
  4131  }
  4132  
  4133  // To shake out latent assumptions about scheduling order,
  4134  // we introduce some randomness into scheduling decisions
  4135  // when running with the race detector.
  4136  // The need for this was made obvious by changing the
  4137  // (deterministic) scheduling order in Go 1.5 and breaking
  4138  // many poorly-written tests.
  4139  // With the randomness here, as long as the tests pass
  4140  // consistently with -race, they shouldn't have latent scheduling
  4141  // assumptions.
  4142  const randomizeScheduler = raceenabled
  4143  
  4144  // runqput tries to put g on the local runnable queue.
  4145  // If next if false, runqput adds g to the tail of the runnable queue.
  4146  // If next is true, runqput puts g in the _p_.runnext slot.
  4147  // If the run queue is full, runnext puts g on the global queue.
  4148  // Executed only by the owner P.
  4149  func runqput(_p_ *p, gp *g, next bool) {
  4150  	if randomizeScheduler && next && fastrand()%2 == 0 {
  4151  		next = false
  4152  	}
  4153  
  4154  	if next {
  4155  	retryNext:
  4156  		oldnext := _p_.runnext
  4157  		if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
  4158  			goto retryNext
  4159  		}
  4160  		if oldnext == 0 {
  4161  			return
  4162  		}
  4163  		// Kick the old runnext out to the regular run queue.
  4164  		gp = oldnext.ptr()
  4165  	}
  4166  
  4167  retry:
  4168  	h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
  4169  	t := _p_.runqtail
  4170  	if t-h < uint32(len(_p_.runq)) {
  4171  		_p_.runq[t%uint32(len(_p_.runq))].set(gp)
  4172  		atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
  4173  		return
  4174  	}
  4175  	if runqputslow(_p_, gp, h, t) {
  4176  		return
  4177  	}
  4178  	// the queue is not full, now the put above must succeed
  4179  	goto retry
  4180  }
  4181  
  4182  // Put g and a batch of work from local runnable queue on global queue.
  4183  // Executed only by the owner P.
  4184  func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
  4185  	var batch [len(_p_.runq)/2 + 1]*g
  4186  
  4187  	// First, grab a batch from local queue.
  4188  	n := t - h
  4189  	n = n / 2
  4190  	if n != uint32(len(_p_.runq)/2) {
  4191  		throw("runqputslow: queue is not full")
  4192  	}
  4193  	for i := uint32(0); i < n; i++ {
  4194  		batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
  4195  	}
  4196  	if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
  4197  		return false
  4198  	}
  4199  	batch[n] = gp
  4200  
  4201  	if randomizeScheduler {
  4202  		for i := uint32(1); i <= n; i++ {
  4203  			j := fastrand() % (i + 1)
  4204  			batch[i], batch[j] = batch[j], batch[i]
  4205  		}
  4206  	}
  4207  
  4208  	// Link the goroutines.
  4209  	for i := uint32(0); i < n; i++ {
  4210  		batch[i].schedlink.set(batch[i+1])
  4211  	}
  4212  
  4213  	// Now put the batch on global queue.
  4214  	lock(&sched.lock)
  4215  	globrunqputbatch(batch[0], batch[n], int32(n+1))
  4216  	unlock(&sched.lock)
  4217  	return true
  4218  }
  4219  
  4220  // Get g from local runnable queue.
  4221  // If inheritTime is true, gp should inherit the remaining time in the
  4222  // current time slice. Otherwise, it should start a new time slice.
  4223  // Executed only by the owner P.
  4224  func runqget(_p_ *p) (gp *g, inheritTime bool) {
  4225  	// If there's a runnext, it's the next G to run.
  4226  	for {
  4227  		next := _p_.runnext
  4228  		if next == 0 {
  4229  			break
  4230  		}
  4231  		if _p_.runnext.cas(next, 0) {
  4232  			return next.ptr(), true
  4233  		}
  4234  	}
  4235  
  4236  	for {
  4237  		h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
  4238  		t := _p_.runqtail
  4239  		if t == h {
  4240  			return nil, false
  4241  		}
  4242  		gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
  4243  		if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
  4244  			return gp, false
  4245  		}
  4246  	}
  4247  }
  4248  
  4249  // Grabs a batch of goroutines from _p_'s runnable queue into batch.
  4250  // Batch is a ring buffer starting at batchHead.
  4251  // Returns number of grabbed goroutines.
  4252  // Can be executed by any P.
  4253  func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
  4254  	for {
  4255  		h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
  4256  		t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer
  4257  		n := t - h
  4258  		n = n - n/2
  4259  		if n == 0 {
  4260  			if stealRunNextG {
  4261  				// Try to steal from _p_.runnext.
  4262  				if next := _p_.runnext; next != 0 {
  4263  					// Sleep to ensure that _p_ isn't about to run the g we
  4264  					// are about to steal.
  4265  					// The important use case here is when the g running on _p_
  4266  					// ready()s another g and then almost immediately blocks.
  4267  					// Instead of stealing runnext in this window, back off
  4268  					// to give _p_ a chance to schedule runnext. This will avoid
  4269  					// thrashing gs between different Ps.
  4270  					// A sync chan send/recv takes ~50ns as of time of writing,
  4271  					// so 3us gives ~50x overshoot.
  4272  					if GOOS != "windows" {
  4273  						usleep(3)
  4274  					} else {
  4275  						// On windows system timer granularity is 1-15ms,
  4276  						// which is way too much for this optimization.
  4277  						// So just yield.
  4278  						osyield()
  4279  					}
  4280  					if !_p_.runnext.cas(next, 0) {
  4281  						continue
  4282  					}
  4283  					batch[batchHead%uint32(len(batch))] = next
  4284  					return 1
  4285  				}
  4286  			}
  4287  			return 0
  4288  		}
  4289  		if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
  4290  			continue
  4291  		}
  4292  		for i := uint32(0); i < n; i++ {
  4293  			g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
  4294  			batch[(batchHead+i)%uint32(len(batch))] = g
  4295  		}
  4296  		if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
  4297  			return n
  4298  		}
  4299  	}
  4300  }
  4301  
  4302  // Steal half of elements from local runnable queue of p2
  4303  // and put onto local runnable queue of p.
  4304  // Returns one of the stolen elements (or nil if failed).
  4305  func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
  4306  	t := _p_.runqtail
  4307  	n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
  4308  	if n == 0 {
  4309  		return nil
  4310  	}
  4311  	n--
  4312  	gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
  4313  	if n == 0 {
  4314  		return gp
  4315  	}
  4316  	h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
  4317  	if t-h+n >= uint32(len(_p_.runq)) {
  4318  		throw("runqsteal: runq overflow")
  4319  	}
  4320  	atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
  4321  	return gp
  4322  }
  4323  
  4324  //go:linkname setMaxThreads runtime/debug.setMaxThreads
  4325  func setMaxThreads(in int) (out int) {
  4326  	lock(&sched.lock)
  4327  	out = int(sched.maxmcount)
  4328  	if in > 0x7fffffff { // MaxInt32
  4329  		sched.maxmcount = 0x7fffffff
  4330  	} else {
  4331  		sched.maxmcount = int32(in)
  4332  	}
  4333  	checkmcount()
  4334  	unlock(&sched.lock)
  4335  	return
  4336  }
  4337  
  4338  func haveexperiment(name string) bool {
  4339  	if name == "framepointer" {
  4340  		return framepointer_enabled // set by linker
  4341  	}
  4342  	x := sys.Goexperiment
  4343  	for x != "" {
  4344  		xname := ""
  4345  		i := index(x, ",")
  4346  		if i < 0 {
  4347  			xname, x = x, ""
  4348  		} else {
  4349  			xname, x = x[:i], x[i+1:]
  4350  		}
  4351  		if xname == name {
  4352  			return true
  4353  		}
  4354  		if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name {
  4355  			return false
  4356  		}
  4357  	}
  4358  	return false
  4359  }
  4360  
  4361  //go:nosplit
  4362  func procPin() int {
  4363  	_g_ := getg()
  4364  	mp := _g_.m
  4365  
  4366  	mp.locks++
  4367  	return int(mp.p.ptr().id)
  4368  }
  4369  
  4370  //go:nosplit
  4371  func procUnpin() {
  4372  	_g_ := getg()
  4373  	_g_.m.locks--
  4374  }
  4375  
  4376  //go:linkname sync_runtime_procPin sync.runtime_procPin
  4377  //go:nosplit
  4378  func sync_runtime_procPin() int {
  4379  	return procPin()
  4380  }
  4381  
  4382  //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
  4383  //go:nosplit
  4384  func sync_runtime_procUnpin() {
  4385  	procUnpin()
  4386  }
  4387  
  4388  //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
  4389  //go:nosplit
  4390  func sync_atomic_runtime_procPin() int {
  4391  	return procPin()
  4392  }
  4393  
  4394  //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
  4395  //go:nosplit
  4396  func sync_atomic_runtime_procUnpin() {
  4397  	procUnpin()
  4398  }
  4399  
  4400  // Active spinning for sync.Mutex.
  4401  //go:linkname sync_runtime_canSpin sync.runtime_canSpin
  4402  //go:nosplit
  4403  func sync_runtime_canSpin(i int) bool {
  4404  	// sync.Mutex is cooperative, so we are conservative with spinning.
  4405  	// Spin only few times and only if running on a multicore machine and
  4406  	// GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
  4407  	// As opposed to runtime mutex we don't do passive spinning here,
  4408  	// because there can be work on global runq on on other Ps.
  4409  	if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
  4410  		return false
  4411  	}
  4412  	if p := getg().m.p.ptr(); !runqempty(p) {
  4413  		return false
  4414  	}
  4415  	return true
  4416  }
  4417  
  4418  //go:linkname sync_runtime_doSpin sync.runtime_doSpin
  4419  //go:nosplit
  4420  func sync_runtime_doSpin() {
  4421  	procyield(active_spin_cnt)
  4422  }
  4423  
  4424  var stealOrder randomOrder
  4425  
  4426  // randomOrder/randomEnum are helper types for randomized work stealing.
  4427  // They allow to enumerate all Ps in different pseudo-random orders without repetitions.
  4428  // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
  4429  // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
  4430  type randomOrder struct {
  4431  	count    uint32
  4432  	coprimes []uint32
  4433  }
  4434  
  4435  type randomEnum struct {
  4436  	i     uint32
  4437  	count uint32
  4438  	pos   uint32
  4439  	inc   uint32
  4440  }
  4441  
  4442  func (ord *randomOrder) reset(count uint32) {
  4443  	ord.count = count
  4444  	ord.coprimes = ord.coprimes[:0]
  4445  	for i := uint32(1); i <= count; i++ {
  4446  		if gcd(i, count) == 1 {
  4447  			ord.coprimes = append(ord.coprimes, i)
  4448  		}
  4449  	}
  4450  }
  4451  
  4452  func (ord *randomOrder) start(i uint32) randomEnum {
  4453  	return randomEnum{
  4454  		count: ord.count,
  4455  		pos:   i % ord.count,
  4456  		inc:   ord.coprimes[i%uint32(len(ord.coprimes))],
  4457  	}
  4458  }
  4459  
  4460  func (enum *randomEnum) done() bool {
  4461  	return enum.i == enum.count
  4462  }
  4463  
  4464  func (enum *randomEnum) next() {
  4465  	enum.i++
  4466  	enum.pos = (enum.pos + enum.inc) % enum.count
  4467  }
  4468  
  4469  func (enum *randomEnum) position() uint32 {
  4470  	return enum.pos
  4471  }
  4472  
  4473  func gcd(a, b uint32) uint32 {
  4474  	for b != 0 {
  4475  		a, b = b, a%b
  4476  	}
  4477  	return a
  4478  }