github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/runtime/proc.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  // Goroutine scheduler
    10  // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
    11  //
    12  // The main concepts are:
    13  // G - goroutine.
    14  // M - worker thread, or machine.
    15  // P - processor, a resource that is required to execute Go code.
    16  //     M must have an associated P to execute Go code, however it can be
    17  //     blocked or in a syscall w/o an associated P.
    18  //
    19  // Design doc at https://golang.org/s/go11sched.
    20  
    21  var (
    22  	m0 m
    23  	g0 g
    24  )
    25  
    26  //go:linkname runtime_init runtime.init
    27  func runtime_init()
    28  
    29  //go:linkname main_init main.init
    30  func main_init()
    31  
    32  // main_init_done is a signal used by cgocallbackg that initialization
    33  // has been completed. It is made before _cgo_notify_runtime_init_done,
    34  // so all cgo calls can rely on it existing. When main_init is complete,
    35  // it is closed, meaning cgocallbackg can reliably receive from it.
    36  var main_init_done chan bool
    37  
    38  //go:linkname main_main main.main
    39  func main_main()
    40  
    41  // runtimeInitTime is the nanotime() at which the runtime started.
    42  var runtimeInitTime int64
    43  
    44  // The main goroutine.
    45  func main() {
    46  	g := getg()
    47  
    48  	// Racectx of m0->g0 is used only as the parent of the main goroutine.
    49  	// It must not be used for anything else.
    50  	g.m.g0.racectx = 0
    51  
    52  	// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
    53  	// Using decimal instead of binary GB and MB because
    54  	// they look nicer in the stack overflow failure message.
    55  	if ptrSize == 8 {
    56  		maxstacksize = 1000000000
    57  	} else {
    58  		maxstacksize = 250000000
    59  	}
    60  
    61  	// Record when the world started.
    62  	runtimeInitTime = nanotime()
    63  
    64  	systemstack(func() {
    65  		newm(sysmon, nil)
    66  	})
    67  
    68  	// Lock the main goroutine onto this, the main OS thread,
    69  	// during initialization.  Most programs won't care, but a few
    70  	// do require certain calls to be made by the main thread.
    71  	// Those can arrange for main.main to run in the main thread
    72  	// by calling runtime.LockOSThread during initialization
    73  	// to preserve the lock.
    74  	lockOSThread()
    75  
    76  	if g.m != &m0 {
    77  		throw("runtime.main not on m0")
    78  	}
    79  
    80  	runtime_init() // must be before defer
    81  
    82  	// Defer unlock so that runtime.Goexit during init does the unlock too.
    83  	needUnlock := true
    84  	defer func() {
    85  		if needUnlock {
    86  			unlockOSThread()
    87  		}
    88  	}()
    89  
    90  	gcenable()
    91  
    92  	main_init_done = make(chan bool)
    93  	if iscgo {
    94  		if _cgo_thread_start == nil {
    95  			throw("_cgo_thread_start missing")
    96  		}
    97  		if _cgo_malloc == nil {
    98  			throw("_cgo_malloc missing")
    99  		}
   100  		if _cgo_free == nil {
   101  			throw("_cgo_free missing")
   102  		}
   103  		if GOOS != "windows" {
   104  			if _cgo_setenv == nil {
   105  				throw("_cgo_setenv missing")
   106  			}
   107  			if _cgo_unsetenv == nil {
   108  				throw("_cgo_unsetenv missing")
   109  			}
   110  		}
   111  		if _cgo_notify_runtime_init_done == nil {
   112  			throw("_cgo_notify_runtime_init_done missing")
   113  		}
   114  		cgocall(_cgo_notify_runtime_init_done, nil)
   115  	}
   116  
   117  	main_init()
   118  	close(main_init_done)
   119  
   120  	needUnlock = false
   121  	unlockOSThread()
   122  
   123  	if isarchive || islibrary {
   124  		// A program compiled with -buildmode=c-archive or c-shared
   125  		// has a main, but it is not executed.
   126  		return
   127  	}
   128  	main_main()
   129  	if raceenabled {
   130  		racefini()
   131  	}
   132  
   133  	// Make racy client program work: if panicking on
   134  	// another goroutine at the same time as main returns,
   135  	// let the other goroutine finish printing the panic trace.
   136  	// Once it does, it will exit. See issue 3934.
   137  	if panicking != 0 {
   138  		gopark(nil, nil, "panicwait", traceEvGoStop, 1)
   139  	}
   140  
   141  	exit(0)
   142  	for {
   143  		var x *int32
   144  		*x = 0
   145  	}
   146  }
   147  
   148  // os_beforeExit is called from os.Exit(0).
   149  //go:linkname os_beforeExit os.runtime_beforeExit
   150  func os_beforeExit() {
   151  	if raceenabled {
   152  		racefini()
   153  	}
   154  }
   155  
   156  // start forcegc helper goroutine
   157  func init() {
   158  	go forcegchelper()
   159  }
   160  
   161  func forcegchelper() {
   162  	forcegc.g = getg()
   163  	for {
   164  		lock(&forcegc.lock)
   165  		if forcegc.idle != 0 {
   166  			throw("forcegc: phase error")
   167  		}
   168  		atomicstore(&forcegc.idle, 1)
   169  		goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
   170  		// this goroutine is explicitly resumed by sysmon
   171  		if debug.gctrace > 0 {
   172  			println("GC forced")
   173  		}
   174  		startGC(gcBackgroundMode, true)
   175  	}
   176  }
   177  
   178  //go:nosplit
   179  
   180  // Gosched yields the processor, allowing other goroutines to run.  It does not
   181  // suspend the current goroutine, so execution resumes automatically.
   182  func Gosched() {
   183  	mcall(gosched_m)
   184  }
   185  
   186  // Puts the current goroutine into a waiting state and calls unlockf.
   187  // If unlockf returns false, the goroutine is resumed.
   188  func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
   189  	mp := acquirem()
   190  	gp := mp.curg
   191  	status := readgstatus(gp)
   192  	if status != _Grunning && status != _Gscanrunning {
   193  		throw("gopark: bad g status")
   194  	}
   195  	mp.waitlock = lock
   196  	mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
   197  	gp.waitreason = reason
   198  	mp.waittraceev = traceEv
   199  	mp.waittraceskip = traceskip
   200  	releasem(mp)
   201  	// can't do anything that might move the G between Ms here.
   202  	mcall(park_m)
   203  }
   204  
   205  // Puts the current goroutine into a waiting state and unlocks the lock.
   206  // The goroutine can be made runnable again by calling goready(gp).
   207  func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
   208  	gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
   209  }
   210  
   211  func goready(gp *g, traceskip int) {
   212  	systemstack(func() {
   213  		ready(gp, traceskip)
   214  	})
   215  }
   216  
   217  //go:nosplit
   218  func acquireSudog() *sudog {
   219  	// Delicate dance: the semaphore implementation calls
   220  	// acquireSudog, acquireSudog calls new(sudog),
   221  	// new calls malloc, malloc can call the garbage collector,
   222  	// and the garbage collector calls the semaphore implementation
   223  	// in stopTheWorld.
   224  	// Break the cycle by doing acquirem/releasem around new(sudog).
   225  	// The acquirem/releasem increments m.locks during new(sudog),
   226  	// which keeps the garbage collector from being invoked.
   227  	mp := acquirem()
   228  	pp := mp.p.ptr()
   229  	if len(pp.sudogcache) == 0 {
   230  		lock(&sched.sudoglock)
   231  		// First, try to grab a batch from central cache.
   232  		for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
   233  			s := sched.sudogcache
   234  			sched.sudogcache = s.next
   235  			s.next = nil
   236  			pp.sudogcache = append(pp.sudogcache, s)
   237  		}
   238  		unlock(&sched.sudoglock)
   239  		// If the central cache is empty, allocate a new one.
   240  		if len(pp.sudogcache) == 0 {
   241  			pp.sudogcache = append(pp.sudogcache, new(sudog))
   242  		}
   243  	}
   244  	n := len(pp.sudogcache)
   245  	s := pp.sudogcache[n-1]
   246  	pp.sudogcache[n-1] = nil
   247  	pp.sudogcache = pp.sudogcache[:n-1]
   248  	if s.elem != nil {
   249  		throw("acquireSudog: found s.elem != nil in cache")
   250  	}
   251  	releasem(mp)
   252  	return s
   253  }
   254  
   255  //go:nosplit
   256  func releaseSudog(s *sudog) {
   257  	if s.elem != nil {
   258  		throw("runtime: sudog with non-nil elem")
   259  	}
   260  	if s.selectdone != nil {
   261  		throw("runtime: sudog with non-nil selectdone")
   262  	}
   263  	if s.next != nil {
   264  		throw("runtime: sudog with non-nil next")
   265  	}
   266  	if s.prev != nil {
   267  		throw("runtime: sudog with non-nil prev")
   268  	}
   269  	if s.waitlink != nil {
   270  		throw("runtime: sudog with non-nil waitlink")
   271  	}
   272  	gp := getg()
   273  	if gp.param != nil {
   274  		throw("runtime: releaseSudog with non-nil gp.param")
   275  	}
   276  	mp := acquirem() // avoid rescheduling to another P
   277  	pp := mp.p.ptr()
   278  	if len(pp.sudogcache) == cap(pp.sudogcache) {
   279  		// Transfer half of local cache to the central cache.
   280  		var first, last *sudog
   281  		for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
   282  			n := len(pp.sudogcache)
   283  			p := pp.sudogcache[n-1]
   284  			pp.sudogcache[n-1] = nil
   285  			pp.sudogcache = pp.sudogcache[:n-1]
   286  			if first == nil {
   287  				first = p
   288  			} else {
   289  				last.next = p
   290  			}
   291  			last = p
   292  		}
   293  		lock(&sched.sudoglock)
   294  		last.next = sched.sudogcache
   295  		sched.sudogcache = first
   296  		unlock(&sched.sudoglock)
   297  	}
   298  	pp.sudogcache = append(pp.sudogcache, s)
   299  	releasem(mp)
   300  }
   301  
   302  // funcPC returns the entry PC of the function f.
   303  // It assumes that f is a func value. Otherwise the behavior is undefined.
   304  //go:nosplit
   305  func funcPC(f interface{}) uintptr {
   306  	return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize))
   307  }
   308  
   309  // called from assembly
   310  func badmcall(fn func(*g)) {
   311  	throw("runtime: mcall called on m->g0 stack")
   312  }
   313  
   314  func badmcall2(fn func(*g)) {
   315  	throw("runtime: mcall function returned")
   316  }
   317  
   318  func badreflectcall() {
   319  	panic("runtime: arg size to reflect.call more than 1GB")
   320  }
   321  
   322  func lockedOSThread() bool {
   323  	gp := getg()
   324  	return gp.lockedm != nil && gp.m.lockedg != nil
   325  }
   326  
   327  var (
   328  	allgs    []*g
   329  	allglock mutex
   330  )
   331  
   332  func allgadd(gp *g) {
   333  	if readgstatus(gp) == _Gidle {
   334  		throw("allgadd: bad status Gidle")
   335  	}
   336  
   337  	lock(&allglock)
   338  	allgs = append(allgs, gp)
   339  	allglen = uintptr(len(allgs))
   340  	unlock(&allglock)
   341  }
   342  
   343  const (
   344  	// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
   345  	// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
   346  	_GoidCacheBatch = 16
   347  )
   348  
   349  // The bootstrap sequence is:
   350  //
   351  //	call osinit
   352  //	call schedinit
   353  //	make & queue new G
   354  //	call runtime·mstart
   355  //
   356  // The new G calls runtime·main.
   357  func schedinit() {
   358  	// raceinit must be the first call to race detector.
   359  	// In particular, it must be done before mallocinit below calls racemapshadow.
   360  	_g_ := getg()
   361  	if raceenabled {
   362  		_g_.racectx = raceinit()
   363  	}
   364  
   365  	sched.maxmcount = 10000
   366  
   367  	// Cache the framepointer experiment.  This affects stack unwinding.
   368  	framepointer_enabled = haveexperiment("framepointer")
   369  
   370  	tracebackinit()
   371  	moduledataverify()
   372  	stackinit()
   373  	mallocinit()
   374  	mcommoninit(_g_.m)
   375  
   376  	goargs()
   377  	goenvs()
   378  	parsedebugvars()
   379  	gcinit()
   380  
   381  	sched.lastpoll = uint64(nanotime())
   382  	procs := int(ncpu)
   383  	if n := atoi(gogetenv("GOMAXPROCS")); n > 0 {
   384  		if n > _MaxGomaxprocs {
   385  			n = _MaxGomaxprocs
   386  		}
   387  		procs = n
   388  	}
   389  	if procresize(int32(procs)) != nil {
   390  		throw("unknown runnable goroutine during bootstrap")
   391  	}
   392  
   393  	if buildVersion == "" {
   394  		// Condition should never trigger.  This code just serves
   395  		// to ensure runtime·buildVersion is kept in the resulting binary.
   396  		buildVersion = "unknown"
   397  	}
   398  }
   399  
   400  func dumpgstatus(gp *g) {
   401  	_g_ := getg()
   402  	print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   403  	print("runtime:  g:  g=", _g_, ", goid=", _g_.goid, ",  g->atomicstatus=", readgstatus(_g_), "\n")
   404  }
   405  
   406  func checkmcount() {
   407  	// sched lock is held
   408  	if sched.mcount > sched.maxmcount {
   409  		print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
   410  		throw("thread exhaustion")
   411  	}
   412  }
   413  
   414  func mcommoninit(mp *m) {
   415  	_g_ := getg()
   416  
   417  	// g0 stack won't make sense for user (and is not necessary unwindable).
   418  	if _g_ != _g_.m.g0 {
   419  		callers(1, mp.createstack[:])
   420  	}
   421  
   422  	mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks())
   423  	if mp.fastrand == 0 {
   424  		mp.fastrand = 0x49f6428a
   425  	}
   426  
   427  	lock(&sched.lock)
   428  	mp.id = sched.mcount
   429  	sched.mcount++
   430  	checkmcount()
   431  	mpreinit(mp)
   432  	if mp.gsignal != nil {
   433  		mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
   434  	}
   435  
   436  	// Add to allm so garbage collector doesn't free g->m
   437  	// when it is just in a register or thread-local storage.
   438  	mp.alllink = allm
   439  
   440  	// NumCgoCall() iterates over allm w/o schedlock,
   441  	// so we need to publish it safely.
   442  	atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
   443  	unlock(&sched.lock)
   444  }
   445  
   446  // Mark gp ready to run.
   447  func ready(gp *g, traceskip int) {
   448  	if trace.enabled {
   449  		traceGoUnpark(gp, traceskip)
   450  	}
   451  
   452  	status := readgstatus(gp)
   453  
   454  	// Mark runnable.
   455  	_g_ := getg()
   456  	_g_.m.locks++ // disable preemption because it can be holding p in a local var
   457  	if status&^_Gscan != _Gwaiting {
   458  		dumpgstatus(gp)
   459  		throw("bad g->status in ready")
   460  	}
   461  
   462  	// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
   463  	casgstatus(gp, _Gwaiting, _Grunnable)
   464  	runqput(_g_.m.p.ptr(), gp, true)
   465  	if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 { // TODO: fast atomic
   466  		wakep()
   467  	}
   468  	_g_.m.locks--
   469  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
   470  		_g_.stackguard0 = stackPreempt
   471  	}
   472  }
   473  
   474  func gcprocs() int32 {
   475  	// Figure out how many CPUs to use during GC.
   476  	// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
   477  	lock(&sched.lock)
   478  	n := gomaxprocs
   479  	if n > ncpu {
   480  		n = ncpu
   481  	}
   482  	if n > _MaxGcproc {
   483  		n = _MaxGcproc
   484  	}
   485  	if n > sched.nmidle+1 { // one M is currently running
   486  		n = sched.nmidle + 1
   487  	}
   488  	unlock(&sched.lock)
   489  	return n
   490  }
   491  
   492  func needaddgcproc() bool {
   493  	lock(&sched.lock)
   494  	n := gomaxprocs
   495  	if n > ncpu {
   496  		n = ncpu
   497  	}
   498  	if n > _MaxGcproc {
   499  		n = _MaxGcproc
   500  	}
   501  	n -= sched.nmidle + 1 // one M is currently running
   502  	unlock(&sched.lock)
   503  	return n > 0
   504  }
   505  
   506  func helpgc(nproc int32) {
   507  	_g_ := getg()
   508  	lock(&sched.lock)
   509  	pos := 0
   510  	for n := int32(1); n < nproc; n++ { // one M is currently running
   511  		if allp[pos].mcache == _g_.m.mcache {
   512  			pos++
   513  		}
   514  		mp := mget()
   515  		if mp == nil {
   516  			throw("gcprocs inconsistency")
   517  		}
   518  		mp.helpgc = n
   519  		mp.p.set(allp[pos])
   520  		mp.mcache = allp[pos].mcache
   521  		pos++
   522  		notewakeup(&mp.park)
   523  	}
   524  	unlock(&sched.lock)
   525  }
   526  
   527  // freezeStopWait is a large value that freezetheworld sets
   528  // sched.stopwait to in order to request that all Gs permanently stop.
   529  const freezeStopWait = 0x7fffffff
   530  
   531  // Similar to stopTheWorld but best-effort and can be called several times.
   532  // There is no reverse operation, used during crashing.
   533  // This function must not lock any mutexes.
   534  func freezetheworld() {
   535  	// stopwait and preemption requests can be lost
   536  	// due to races with concurrently executing threads,
   537  	// so try several times
   538  	for i := 0; i < 5; i++ {
   539  		// this should tell the scheduler to not start any new goroutines
   540  		sched.stopwait = freezeStopWait
   541  		atomicstore(&sched.gcwaiting, 1)
   542  		// this should stop running goroutines
   543  		if !preemptall() {
   544  			break // no running goroutines
   545  		}
   546  		usleep(1000)
   547  	}
   548  	// to be sure
   549  	usleep(1000)
   550  	preemptall()
   551  	usleep(1000)
   552  }
   553  
   554  func isscanstatus(status uint32) bool {
   555  	if status == _Gscan {
   556  		throw("isscanstatus: Bad status Gscan")
   557  	}
   558  	return status&_Gscan == _Gscan
   559  }
   560  
   561  // All reads and writes of g's status go through readgstatus, casgstatus
   562  // castogscanstatus, casfrom_Gscanstatus.
   563  //go:nosplit
   564  func readgstatus(gp *g) uint32 {
   565  	return atomicload(&gp.atomicstatus)
   566  }
   567  
   568  // Ownership of gscanvalid:
   569  //
   570  // If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
   571  // then gp owns gp.gscanvalid, and other goroutines must not modify it.
   572  //
   573  // Otherwise, a second goroutine can lock the scan state by setting _Gscan
   574  // in the status bit and then modify gscanvalid, and then unlock the scan state.
   575  //
   576  // Note that the first condition implies an exception to the second:
   577  // if a second goroutine changes gp's status to _Grunning|_Gscan,
   578  // that second goroutine still does not have the right to modify gscanvalid.
   579  
   580  // The Gscanstatuses are acting like locks and this releases them.
   581  // If it proves to be a performance hit we should be able to make these
   582  // simple atomic stores but for now we are going to throw if
   583  // we see an inconsistent state.
   584  func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
   585  	success := false
   586  
   587  	// Check that transition is valid.
   588  	switch oldval {
   589  	default:
   590  		print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
   591  		dumpgstatus(gp)
   592  		throw("casfrom_Gscanstatus:top gp->status is not in scan state")
   593  	case _Gscanrunnable,
   594  		_Gscanwaiting,
   595  		_Gscanrunning,
   596  		_Gscansyscall:
   597  		if newval == oldval&^_Gscan {
   598  			success = cas(&gp.atomicstatus, oldval, newval)
   599  		}
   600  	case _Gscanenqueue:
   601  		if newval == _Gwaiting {
   602  			success = cas(&gp.atomicstatus, oldval, newval)
   603  		}
   604  	}
   605  	if !success {
   606  		print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
   607  		dumpgstatus(gp)
   608  		throw("casfrom_Gscanstatus: gp->status is not in scan state")
   609  	}
   610  	if newval == _Grunning {
   611  		gp.gcscanvalid = false
   612  	}
   613  }
   614  
   615  // This will return false if the gp is not in the expected status and the cas fails.
   616  // This acts like a lock acquire while the casfromgstatus acts like a lock release.
   617  func castogscanstatus(gp *g, oldval, newval uint32) bool {
   618  	switch oldval {
   619  	case _Grunnable,
   620  		_Gwaiting,
   621  		_Gsyscall:
   622  		if newval == oldval|_Gscan {
   623  			return cas(&gp.atomicstatus, oldval, newval)
   624  		}
   625  	case _Grunning:
   626  		if newval == _Gscanrunning || newval == _Gscanenqueue {
   627  			return cas(&gp.atomicstatus, oldval, newval)
   628  		}
   629  	}
   630  	print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
   631  	throw("castogscanstatus")
   632  	panic("not reached")
   633  }
   634  
   635  // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
   636  // and casfrom_Gscanstatus instead.
   637  // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
   638  // put it in the Gscan state is finished.
   639  //go:nosplit
   640  func casgstatus(gp *g, oldval, newval uint32) {
   641  	if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
   642  		systemstack(func() {
   643  			print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
   644  			throw("casgstatus: bad incoming values")
   645  		})
   646  	}
   647  
   648  	if oldval == _Grunning && gp.gcscanvalid {
   649  		// If oldvall == _Grunning, then the actual status must be
   650  		// _Grunning or _Grunning|_Gscan; either way,
   651  		// we own gp.gcscanvalid, so it's safe to read.
   652  		// gp.gcscanvalid must not be true when we are running.
   653  		print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
   654  		throw("casgstatus")
   655  	}
   656  
   657  	// loop if gp->atomicstatus is in a scan state giving
   658  	// GC time to finish and change the state to oldval.
   659  	for !cas(&gp.atomicstatus, oldval, newval) {
   660  		if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
   661  			systemstack(func() {
   662  				throw("casgstatus: waiting for Gwaiting but is Grunnable")
   663  			})
   664  		}
   665  		// Help GC if needed.
   666  		// if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
   667  		// 	gp.preemptscan = false
   668  		// 	systemstack(func() {
   669  		// 		gcphasework(gp)
   670  		// 	})
   671  		// }
   672  	}
   673  	if newval == _Grunning {
   674  		gp.gcscanvalid = false
   675  	}
   676  }
   677  
   678  // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
   679  // Returns old status. Cannot call casgstatus directly, because we are racing with an
   680  // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
   681  // it might have become Grunnable by the time we get to the cas. If we called casgstatus,
   682  // it would loop waiting for the status to go back to Gwaiting, which it never will.
   683  //go:nosplit
   684  func casgcopystack(gp *g) uint32 {
   685  	for {
   686  		oldstatus := readgstatus(gp) &^ _Gscan
   687  		if oldstatus != _Gwaiting && oldstatus != _Grunnable {
   688  			throw("copystack: bad status, not Gwaiting or Grunnable")
   689  		}
   690  		if cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
   691  			return oldstatus
   692  		}
   693  	}
   694  }
   695  
   696  // scang blocks until gp's stack has been scanned.
   697  // It might be scanned by scang or it might be scanned by the goroutine itself.
   698  // Either way, the stack scan has completed when scang returns.
   699  func scang(gp *g) {
   700  	// Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
   701  	// Nothing is racing with us now, but gcscandone might be set to true left over
   702  	// from an earlier round of stack scanning (we scan twice per GC).
   703  	// We use gcscandone to record whether the scan has been done during this round.
   704  	// It is important that the scan happens exactly once: if called twice,
   705  	// the installation of stack barriers will detect the double scan and die.
   706  
   707  	gp.gcscandone = false
   708  
   709  	// Endeavor to get gcscandone set to true,
   710  	// either by doing the stack scan ourselves or by coercing gp to scan itself.
   711  	// gp.gcscandone can transition from false to true when we're not looking
   712  	// (if we asked for preemption), so any time we lock the status using
   713  	// castogscanstatus we have to double-check that the scan is still not done.
   714  	for !gp.gcscandone {
   715  		switch s := readgstatus(gp); s {
   716  		default:
   717  			dumpgstatus(gp)
   718  			throw("stopg: invalid status")
   719  
   720  		case _Gdead:
   721  			// No stack.
   722  			gp.gcscandone = true
   723  
   724  		case _Gcopystack:
   725  		// Stack being switched. Go around again.
   726  
   727  		case _Grunnable, _Gsyscall, _Gwaiting:
   728  			// Claim goroutine by setting scan bit.
   729  			// Racing with execution or readying of gp.
   730  			// The scan bit keeps them from running
   731  			// the goroutine until we're done.
   732  			if castogscanstatus(gp, s, s|_Gscan) {
   733  				if !gp.gcscandone {
   734  					// Coordinate with traceback
   735  					// in sigprof.
   736  					for !cas(&gp.stackLock, 0, 1) {
   737  						osyield()
   738  					}
   739  					scanstack(gp)
   740  					atomicstore(&gp.stackLock, 0)
   741  					gp.gcscandone = true
   742  				}
   743  				restartg(gp)
   744  			}
   745  
   746  		case _Gscanwaiting:
   747  		// newstack is doing a scan for us right now. Wait.
   748  
   749  		case _Grunning:
   750  			// Goroutine running. Try to preempt execution so it can scan itself.
   751  			// The preemption handler (in newstack) does the actual scan.
   752  
   753  			// Optimization: if there is already a pending preemption request
   754  			// (from the previous loop iteration), don't bother with the atomics.
   755  			if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt {
   756  				break
   757  			}
   758  
   759  			// Ask for preemption and self scan.
   760  			if castogscanstatus(gp, _Grunning, _Gscanrunning) {
   761  				if !gp.gcscandone {
   762  					gp.preemptscan = true
   763  					gp.preempt = true
   764  					gp.stackguard0 = stackPreempt
   765  				}
   766  				casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
   767  			}
   768  		}
   769  	}
   770  
   771  	gp.preemptscan = false // cancel scan request if no longer needed
   772  }
   773  
   774  // The GC requests that this routine be moved from a scanmumble state to a mumble state.
   775  func restartg(gp *g) {
   776  	s := readgstatus(gp)
   777  	switch s {
   778  	default:
   779  		dumpgstatus(gp)
   780  		throw("restartg: unexpected status")
   781  
   782  	case _Gdead:
   783  	// ok
   784  
   785  	case _Gscanrunnable,
   786  		_Gscanwaiting,
   787  		_Gscansyscall:
   788  		casfrom_Gscanstatus(gp, s, s&^_Gscan)
   789  
   790  	// Scan is now completed.
   791  	// Goroutine now needs to be made runnable.
   792  	// We put it on the global run queue; ready blocks on the global scheduler lock.
   793  	case _Gscanenqueue:
   794  		casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting)
   795  		if gp != getg().m.curg {
   796  			throw("processing Gscanenqueue on wrong m")
   797  		}
   798  		dropg()
   799  		ready(gp, 0)
   800  	}
   801  }
   802  
   803  // stopTheWorld stops all P's from executing goroutines, interrupting
   804  // all goroutines at GC safe points and records reason as the reason
   805  // for the stop. On return, only the current goroutine's P is running.
   806  // stopTheWorld must not be called from a system stack and the caller
   807  // must not hold worldsema. The caller must call startTheWorld when
   808  // other P's should resume execution.
   809  //
   810  // stopTheWorld is safe for multiple goroutines to call at the
   811  // same time. Each will execute its own stop, and the stops will
   812  // be serialized.
   813  //
   814  // This is also used by routines that do stack dumps. If the system is
   815  // in panic or being exited, this may not reliably stop all
   816  // goroutines.
   817  func stopTheWorld(reason string) {
   818  	semacquire(&worldsema, false)
   819  	getg().m.preemptoff = reason
   820  	systemstack(stopTheWorldWithSema)
   821  }
   822  
   823  // startTheWorld undoes the effects of stopTheWorld.
   824  func startTheWorld() {
   825  	systemstack(startTheWorldWithSema)
   826  	// worldsema must be held over startTheWorldWithSema to ensure
   827  	// gomaxprocs cannot change while worldsema is held.
   828  	semrelease(&worldsema)
   829  	getg().m.preemptoff = ""
   830  }
   831  
   832  // Holding worldsema grants an M the right to try to stop the world
   833  // and prevents gomaxprocs from changing concurrently.
   834  var worldsema uint32 = 1
   835  
   836  // stopTheWorldWithSema is the core implementation of stopTheWorld.
   837  // The caller is responsible for acquiring worldsema and disabling
   838  // preemption first and then should stopTheWorldWithSema on the system
   839  // stack:
   840  //
   841  //	semacquire(&worldsema, false)
   842  //	m.preemptoff = "reason"
   843  //	systemstack(stopTheWorldWithSema)
   844  //
   845  // When finished, the caller must either call startTheWorld or undo
   846  // these three operations separately:
   847  //
   848  //	m.preemptoff = ""
   849  //	systemstack(startTheWorldWithSema)
   850  //	semrelease(&worldsema)
   851  //
   852  // It is allowed to acquire worldsema once and then execute multiple
   853  // startTheWorldWithSema/stopTheWorldWithSema pairs.
   854  // Other P's are able to execute between successive calls to
   855  // startTheWorldWithSema and stopTheWorldWithSema.
   856  // Holding worldsema causes any other goroutines invoking
   857  // stopTheWorld to block.
   858  func stopTheWorldWithSema() {
   859  	_g_ := getg()
   860  
   861  	// If we hold a lock, then we won't be able to stop another M
   862  	// that is blocked trying to acquire the lock.
   863  	if _g_.m.locks > 0 {
   864  		throw("stopTheWorld: holding locks")
   865  	}
   866  
   867  	lock(&sched.lock)
   868  	sched.stopwait = gomaxprocs
   869  	atomicstore(&sched.gcwaiting, 1)
   870  	preemptall()
   871  	// stop current P
   872  	_g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
   873  	sched.stopwait--
   874  	// try to retake all P's in Psyscall status
   875  	for i := 0; i < int(gomaxprocs); i++ {
   876  		p := allp[i]
   877  		s := p.status
   878  		if s == _Psyscall && cas(&p.status, s, _Pgcstop) {
   879  			if trace.enabled {
   880  				traceGoSysBlock(p)
   881  				traceProcStop(p)
   882  			}
   883  			p.syscalltick++
   884  			sched.stopwait--
   885  		}
   886  	}
   887  	// stop idle P's
   888  	for {
   889  		p := pidleget()
   890  		if p == nil {
   891  			break
   892  		}
   893  		p.status = _Pgcstop
   894  		sched.stopwait--
   895  	}
   896  	wait := sched.stopwait > 0
   897  	unlock(&sched.lock)
   898  
   899  	// wait for remaining P's to stop voluntarily
   900  	if wait {
   901  		for {
   902  			// wait for 100us, then try to re-preempt in case of any races
   903  			if notetsleep(&sched.stopnote, 100*1000) {
   904  				noteclear(&sched.stopnote)
   905  				break
   906  			}
   907  			preemptall()
   908  		}
   909  	}
   910  	if sched.stopwait != 0 {
   911  		throw("stopTheWorld: not stopped")
   912  	}
   913  	for i := 0; i < int(gomaxprocs); i++ {
   914  		p := allp[i]
   915  		if p.status != _Pgcstop {
   916  			throw("stopTheWorld: not stopped")
   917  		}
   918  	}
   919  }
   920  
   921  func mhelpgc() {
   922  	_g_ := getg()
   923  	_g_.m.helpgc = -1
   924  }
   925  
   926  func startTheWorldWithSema() {
   927  	_g_ := getg()
   928  
   929  	_g_.m.locks++        // disable preemption because it can be holding p in a local var
   930  	gp := netpoll(false) // non-blocking
   931  	injectglist(gp)
   932  	add := needaddgcproc()
   933  	lock(&sched.lock)
   934  
   935  	procs := gomaxprocs
   936  	if newprocs != 0 {
   937  		procs = newprocs
   938  		newprocs = 0
   939  	}
   940  	p1 := procresize(procs)
   941  	sched.gcwaiting = 0
   942  	if sched.sysmonwait != 0 {
   943  		sched.sysmonwait = 0
   944  		notewakeup(&sched.sysmonnote)
   945  	}
   946  	unlock(&sched.lock)
   947  
   948  	for p1 != nil {
   949  		p := p1
   950  		p1 = p1.link.ptr()
   951  		if p.m != 0 {
   952  			mp := p.m.ptr()
   953  			p.m = 0
   954  			if mp.nextp != 0 {
   955  				throw("startTheWorld: inconsistent mp->nextp")
   956  			}
   957  			mp.nextp.set(p)
   958  			notewakeup(&mp.park)
   959  		} else {
   960  			// Start M to run P.  Do not start another M below.
   961  			newm(nil, p)
   962  			add = false
   963  		}
   964  	}
   965  
   966  	// Wakeup an additional proc in case we have excessive runnable goroutines
   967  	// in local queues or in the global queue. If we don't, the proc will park itself.
   968  	// If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
   969  	if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 {
   970  		wakep()
   971  	}
   972  
   973  	if add {
   974  		// If GC could have used another helper proc, start one now,
   975  		// in the hope that it will be available next time.
   976  		// It would have been even better to start it before the collection,
   977  		// but doing so requires allocating memory, so it's tricky to
   978  		// coordinate.  This lazy approach works out in practice:
   979  		// we don't mind if the first couple gc rounds don't have quite
   980  		// the maximum number of procs.
   981  		newm(mhelpgc, nil)
   982  	}
   983  	_g_.m.locks--
   984  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
   985  		_g_.stackguard0 = stackPreempt
   986  	}
   987  }
   988  
   989  // Called to start an M.
   990  //go:nosplit
   991  func mstart() {
   992  	_g_ := getg()
   993  
   994  	if _g_.stack.lo == 0 {
   995  		// Initialize stack bounds from system stack.
   996  		// Cgo may have left stack size in stack.hi.
   997  		size := _g_.stack.hi
   998  		if size == 0 {
   999  			size = 8192 * stackGuardMultiplier
  1000  		}
  1001  		_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
  1002  		_g_.stack.lo = _g_.stack.hi - size + 1024
  1003  	}
  1004  	// Initialize stack guards so that we can start calling
  1005  	// both Go and C functions with stack growth prologues.
  1006  	_g_.stackguard0 = _g_.stack.lo + _StackGuard
  1007  	_g_.stackguard1 = _g_.stackguard0
  1008  	mstart1()
  1009  }
  1010  
  1011  func mstart1() {
  1012  	_g_ := getg()
  1013  
  1014  	if _g_ != _g_.m.g0 {
  1015  		throw("bad runtime·mstart")
  1016  	}
  1017  
  1018  	// Record top of stack for use by mcall.
  1019  	// Once we call schedule we're never coming back,
  1020  	// so other calls can reuse this stack space.
  1021  	gosave(&_g_.m.g0.sched)
  1022  	_g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used
  1023  	asminit()
  1024  	minit()
  1025  
  1026  	// Install signal handlers; after minit so that minit can
  1027  	// prepare the thread to be able to handle the signals.
  1028  	if _g_.m == &m0 {
  1029  		// Create an extra M for callbacks on threads not created by Go.
  1030  		if iscgo && !cgoHasExtraM {
  1031  			cgoHasExtraM = true
  1032  			newextram()
  1033  		}
  1034  		initsig()
  1035  	}
  1036  
  1037  	if fn := _g_.m.mstartfn; fn != nil {
  1038  		fn()
  1039  	}
  1040  
  1041  	if _g_.m.helpgc != 0 {
  1042  		_g_.m.helpgc = 0
  1043  		stopm()
  1044  	} else if _g_.m != &m0 {
  1045  		acquirep(_g_.m.nextp.ptr())
  1046  		_g_.m.nextp = 0
  1047  	}
  1048  	schedule()
  1049  }
  1050  
  1051  // forEachP calls fn(p) for every P p when p reaches a GC safe point.
  1052  // If a P is currently executing code, this will bring the P to a GC
  1053  // safe point and execute fn on that P. If the P is not executing code
  1054  // (it is idle or in a syscall), this will call fn(p) directly while
  1055  // preventing the P from exiting its state. This does not ensure that
  1056  // fn will run on every CPU executing Go code, but it acts as a global
  1057  // memory barrier. GC uses this as a "ragged barrier."
  1058  //
  1059  // The caller must hold worldsema.
  1060  func forEachP(fn func(*p)) {
  1061  	mp := acquirem()
  1062  	_p_ := getg().m.p.ptr()
  1063  
  1064  	lock(&sched.lock)
  1065  	if sched.safePointWait != 0 {
  1066  		throw("forEachP: sched.safePointWait != 0")
  1067  	}
  1068  	sched.safePointWait = gomaxprocs - 1
  1069  	sched.safePointFn = fn
  1070  
  1071  	// Ask all Ps to run the safe point function.
  1072  	for _, p := range allp[:gomaxprocs] {
  1073  		if p != _p_ {
  1074  			atomicstore(&p.runSafePointFn, 1)
  1075  		}
  1076  	}
  1077  	preemptall()
  1078  
  1079  	// Any P entering _Pidle or _Psyscall from now on will observe
  1080  	// p.runSafePointFn == 1 and will call runSafePointFn when
  1081  	// changing its status to _Pidle/_Psyscall.
  1082  
  1083  	// Run safe point function for all idle Ps. sched.pidle will
  1084  	// not change because we hold sched.lock.
  1085  	for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
  1086  		if cas(&p.runSafePointFn, 1, 0) {
  1087  			fn(p)
  1088  			sched.safePointWait--
  1089  		}
  1090  	}
  1091  
  1092  	wait := sched.safePointWait > 0
  1093  	unlock(&sched.lock)
  1094  
  1095  	// Run fn for the current P.
  1096  	fn(_p_)
  1097  
  1098  	// Force Ps currently in _Psyscall into _Pidle and hand them
  1099  	// off to induce safe point function execution.
  1100  	for i := 0; i < int(gomaxprocs); i++ {
  1101  		p := allp[i]
  1102  		s := p.status
  1103  		if s == _Psyscall && p.runSafePointFn == 1 && cas(&p.status, s, _Pidle) {
  1104  			if trace.enabled {
  1105  				traceGoSysBlock(p)
  1106  				traceProcStop(p)
  1107  			}
  1108  			p.syscalltick++
  1109  			handoffp(p)
  1110  		}
  1111  	}
  1112  
  1113  	// Wait for remaining Ps to run fn.
  1114  	if wait {
  1115  		for {
  1116  			// Wait for 100us, then try to re-preempt in
  1117  			// case of any races.
  1118  			if notetsleep(&sched.safePointNote, 100*1000) {
  1119  				noteclear(&sched.safePointNote)
  1120  				break
  1121  			}
  1122  			preemptall()
  1123  		}
  1124  	}
  1125  	if sched.safePointWait != 0 {
  1126  		throw("forEachP: not done")
  1127  	}
  1128  	for i := 0; i < int(gomaxprocs); i++ {
  1129  		p := allp[i]
  1130  		if p.runSafePointFn != 0 {
  1131  			throw("forEachP: P did not run fn")
  1132  		}
  1133  	}
  1134  
  1135  	lock(&sched.lock)
  1136  	sched.safePointFn = nil
  1137  	unlock(&sched.lock)
  1138  	releasem(mp)
  1139  }
  1140  
  1141  // runSafePointFn runs the safe point function, if any, for this P.
  1142  // This should be called like
  1143  //
  1144  //     if getg().m.p.runSafePointFn != 0 {
  1145  //         runSafePointFn()
  1146  //     }
  1147  //
  1148  // runSafePointFn must be checked on any transition in to _Pidle or
  1149  // _Psyscall to avoid a race where forEachP sees that the P is running
  1150  // just before the P goes into _Pidle/_Psyscall and neither forEachP
  1151  // nor the P run the safe-point function.
  1152  func runSafePointFn() {
  1153  	p := getg().m.p.ptr()
  1154  	// Resolve the race between forEachP running the safe-point
  1155  	// function on this P's behalf and this P running the
  1156  	// safe-point function directly.
  1157  	if !cas(&p.runSafePointFn, 1, 0) {
  1158  		return
  1159  	}
  1160  	sched.safePointFn(p)
  1161  	lock(&sched.lock)
  1162  	sched.safePointWait--
  1163  	if sched.safePointWait == 0 {
  1164  		notewakeup(&sched.safePointNote)
  1165  	}
  1166  	unlock(&sched.lock)
  1167  }
  1168  
  1169  // When running with cgo, we call _cgo_thread_start
  1170  // to start threads for us so that we can play nicely with
  1171  // foreign code.
  1172  var cgoThreadStart unsafe.Pointer
  1173  
  1174  type cgothreadstart struct {
  1175  	g   guintptr
  1176  	tls *uint64
  1177  	fn  unsafe.Pointer
  1178  }
  1179  
  1180  // Allocate a new m unassociated with any thread.
  1181  // Can use p for allocation context if needed.
  1182  // fn is recorded as the new m's m.mstartfn.
  1183  func allocm(_p_ *p, fn func()) *m {
  1184  	_g_ := getg()
  1185  	_g_.m.locks++ // disable GC because it can be called from sysmon
  1186  	if _g_.m.p == 0 {
  1187  		acquirep(_p_) // temporarily borrow p for mallocs in this function
  1188  	}
  1189  	mp := new(m)
  1190  	mp.mstartfn = fn
  1191  	mcommoninit(mp)
  1192  
  1193  	// In case of cgo or Solaris, pthread_create will make us a stack.
  1194  	// Windows and Plan 9 will layout sched stack on OS stack.
  1195  	if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" {
  1196  		mp.g0 = malg(-1)
  1197  	} else {
  1198  		mp.g0 = malg(8192 * stackGuardMultiplier)
  1199  	}
  1200  	mp.g0.m = mp
  1201  
  1202  	if _p_ == _g_.m.p.ptr() {
  1203  		releasep()
  1204  	}
  1205  	_g_.m.locks--
  1206  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
  1207  		_g_.stackguard0 = stackPreempt
  1208  	}
  1209  
  1210  	return mp
  1211  }
  1212  
  1213  // needm is called when a cgo callback happens on a
  1214  // thread without an m (a thread not created by Go).
  1215  // In this case, needm is expected to find an m to use
  1216  // and return with m, g initialized correctly.
  1217  // Since m and g are not set now (likely nil, but see below)
  1218  // needm is limited in what routines it can call. In particular
  1219  // it can only call nosplit functions (textflag 7) and cannot
  1220  // do any scheduling that requires an m.
  1221  //
  1222  // In order to avoid needing heavy lifting here, we adopt
  1223  // the following strategy: there is a stack of available m's
  1224  // that can be stolen. Using compare-and-swap
  1225  // to pop from the stack has ABA races, so we simulate
  1226  // a lock by doing an exchange (via casp) to steal the stack
  1227  // head and replace the top pointer with MLOCKED (1).
  1228  // This serves as a simple spin lock that we can use even
  1229  // without an m. The thread that locks the stack in this way
  1230  // unlocks the stack by storing a valid stack head pointer.
  1231  //
  1232  // In order to make sure that there is always an m structure
  1233  // available to be stolen, we maintain the invariant that there
  1234  // is always one more than needed. At the beginning of the
  1235  // program (if cgo is in use) the list is seeded with a single m.
  1236  // If needm finds that it has taken the last m off the list, its job
  1237  // is - once it has installed its own m so that it can do things like
  1238  // allocate memory - to create a spare m and put it on the list.
  1239  //
  1240  // Each of these extra m's also has a g0 and a curg that are
  1241  // pressed into service as the scheduling stack and current
  1242  // goroutine for the duration of the cgo callback.
  1243  //
  1244  // When the callback is done with the m, it calls dropm to
  1245  // put the m back on the list.
  1246  //go:nosplit
  1247  func needm(x byte) {
  1248  	if iscgo && !cgoHasExtraM {
  1249  		// Can happen if C/C++ code calls Go from a global ctor.
  1250  		// Can not throw, because scheduler is not initialized yet.
  1251  		write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
  1252  		exit(1)
  1253  	}
  1254  
  1255  	// Lock extra list, take head, unlock popped list.
  1256  	// nilokay=false is safe here because of the invariant above,
  1257  	// that the extra list always contains or will soon contain
  1258  	// at least one m.
  1259  	mp := lockextra(false)
  1260  
  1261  	// Set needextram when we've just emptied the list,
  1262  	// so that the eventual call into cgocallbackg will
  1263  	// allocate a new m for the extra list. We delay the
  1264  	// allocation until then so that it can be done
  1265  	// after exitsyscall makes sure it is okay to be
  1266  	// running at all (that is, there's no garbage collection
  1267  	// running right now).
  1268  	mp.needextram = mp.schedlink == 0
  1269  	unlockextra(mp.schedlink.ptr())
  1270  
  1271  	// Install g (= m->g0) and set the stack bounds
  1272  	// to match the current stack. We don't actually know
  1273  	// how big the stack is, like we don't know how big any
  1274  	// scheduling stack is, but we assume there's at least 32 kB,
  1275  	// which is more than enough for us.
  1276  	setg(mp.g0)
  1277  	_g_ := getg()
  1278  	_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
  1279  	_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
  1280  	_g_.stackguard0 = _g_.stack.lo + _StackGuard
  1281  
  1282  	msigsave(mp)
  1283  	// Initialize this thread to use the m.
  1284  	asminit()
  1285  	minit()
  1286  }
  1287  
  1288  var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
  1289  
  1290  // newextram allocates an m and puts it on the extra list.
  1291  // It is called with a working local m, so that it can do things
  1292  // like call schedlock and allocate.
  1293  func newextram() {
  1294  	// Create extra goroutine locked to extra m.
  1295  	// The goroutine is the context in which the cgo callback will run.
  1296  	// The sched.pc will never be returned to, but setting it to
  1297  	// goexit makes clear to the traceback routines where
  1298  	// the goroutine stack ends.
  1299  	mp := allocm(nil, nil)
  1300  	gp := malg(4096)
  1301  	gp.sched.pc = funcPC(goexit) + _PCQuantum
  1302  	gp.sched.sp = gp.stack.hi
  1303  	gp.sched.sp -= 4 * regSize // extra space in case of reads slightly beyond frame
  1304  	gp.sched.lr = 0
  1305  	gp.sched.g = guintptr(unsafe.Pointer(gp))
  1306  	gp.syscallpc = gp.sched.pc
  1307  	gp.syscallsp = gp.sched.sp
  1308  	gp.stktopsp = gp.sched.sp
  1309  	// malg returns status as Gidle, change to Gsyscall before adding to allg
  1310  	// where GC will see it.
  1311  	casgstatus(gp, _Gidle, _Gsyscall)
  1312  	gp.m = mp
  1313  	mp.curg = gp
  1314  	mp.locked = _LockInternal
  1315  	mp.lockedg = gp
  1316  	gp.lockedm = mp
  1317  	gp.goid = int64(xadd64(&sched.goidgen, 1))
  1318  	if raceenabled {
  1319  		gp.racectx = racegostart(funcPC(newextram))
  1320  	}
  1321  	// put on allg for garbage collector
  1322  	allgadd(gp)
  1323  
  1324  	// Add m to the extra list.
  1325  	mnext := lockextra(true)
  1326  	mp.schedlink.set(mnext)
  1327  	unlockextra(mp)
  1328  }
  1329  
  1330  // dropm is called when a cgo callback has called needm but is now
  1331  // done with the callback and returning back into the non-Go thread.
  1332  // It puts the current m back onto the extra list.
  1333  //
  1334  // The main expense here is the call to signalstack to release the
  1335  // m's signal stack, and then the call to needm on the next callback
  1336  // from this thread. It is tempting to try to save the m for next time,
  1337  // which would eliminate both these costs, but there might not be
  1338  // a next time: the current thread (which Go does not control) might exit.
  1339  // If we saved the m for that thread, there would be an m leak each time
  1340  // such a thread exited. Instead, we acquire and release an m on each
  1341  // call. These should typically not be scheduling operations, just a few
  1342  // atomics, so the cost should be small.
  1343  //
  1344  // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
  1345  // variable using pthread_key_create. Unlike the pthread keys we already use
  1346  // on OS X, this dummy key would never be read by Go code. It would exist
  1347  // only so that we could register at thread-exit-time destructor.
  1348  // That destructor would put the m back onto the extra list.
  1349  // This is purely a performance optimization. The current version,
  1350  // in which dropm happens on each cgo call, is still correct too.
  1351  // We may have to keep the current version on systems with cgo
  1352  // but without pthreads, like Windows.
  1353  func dropm() {
  1354  	// Undo whatever initialization minit did during needm.
  1355  	unminit()
  1356  
  1357  	// Clear m and g, and return m to the extra list.
  1358  	// After the call to setg we can only call nosplit functions
  1359  	// with no pointer manipulation.
  1360  	mp := getg().m
  1361  	mnext := lockextra(true)
  1362  	mp.schedlink.set(mnext)
  1363  
  1364  	setg(nil)
  1365  	unlockextra(mp)
  1366  }
  1367  
  1368  var extram uintptr
  1369  
  1370  // lockextra locks the extra list and returns the list head.
  1371  // The caller must unlock the list by storing a new list head
  1372  // to extram. If nilokay is true, then lockextra will
  1373  // return a nil list head if that's what it finds. If nilokay is false,
  1374  // lockextra will keep waiting until the list head is no longer nil.
  1375  //go:nosplit
  1376  func lockextra(nilokay bool) *m {
  1377  	const locked = 1
  1378  
  1379  	for {
  1380  		old := atomicloaduintptr(&extram)
  1381  		if old == locked {
  1382  			yield := osyield
  1383  			yield()
  1384  			continue
  1385  		}
  1386  		if old == 0 && !nilokay {
  1387  			usleep(1)
  1388  			continue
  1389  		}
  1390  		if casuintptr(&extram, old, locked) {
  1391  			return (*m)(unsafe.Pointer(old))
  1392  		}
  1393  		yield := osyield
  1394  		yield()
  1395  		continue
  1396  	}
  1397  }
  1398  
  1399  //go:nosplit
  1400  func unlockextra(mp *m) {
  1401  	atomicstoreuintptr(&extram, uintptr(unsafe.Pointer(mp)))
  1402  }
  1403  
  1404  // Create a new m.  It will start off with a call to fn, or else the scheduler.
  1405  // fn needs to be static and not a heap allocated closure.
  1406  // May run with m.p==nil, so write barriers are not allowed.
  1407  //go:nowritebarrier
  1408  func newm(fn func(), _p_ *p) {
  1409  	mp := allocm(_p_, fn)
  1410  	mp.nextp.set(_p_)
  1411  	msigsave(mp)
  1412  	if iscgo {
  1413  		var ts cgothreadstart
  1414  		if _cgo_thread_start == nil {
  1415  			throw("_cgo_thread_start missing")
  1416  		}
  1417  		ts.g.set(mp.g0)
  1418  		ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
  1419  		ts.fn = unsafe.Pointer(funcPC(mstart))
  1420  		asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
  1421  		return
  1422  	}
  1423  	newosproc(mp, unsafe.Pointer(mp.g0.stack.hi))
  1424  }
  1425  
  1426  // Stops execution of the current m until new work is available.
  1427  // Returns with acquired P.
  1428  func stopm() {
  1429  	_g_ := getg()
  1430  
  1431  	if _g_.m.locks != 0 {
  1432  		throw("stopm holding locks")
  1433  	}
  1434  	if _g_.m.p != 0 {
  1435  		throw("stopm holding p")
  1436  	}
  1437  	if _g_.m.spinning {
  1438  		_g_.m.spinning = false
  1439  		xadd(&sched.nmspinning, -1)
  1440  	}
  1441  
  1442  retry:
  1443  	lock(&sched.lock)
  1444  	mput(_g_.m)
  1445  	unlock(&sched.lock)
  1446  	notesleep(&_g_.m.park)
  1447  	noteclear(&_g_.m.park)
  1448  	if _g_.m.helpgc != 0 {
  1449  		gchelper()
  1450  		_g_.m.helpgc = 0
  1451  		_g_.m.mcache = nil
  1452  		_g_.m.p = 0
  1453  		goto retry
  1454  	}
  1455  	acquirep(_g_.m.nextp.ptr())
  1456  	_g_.m.nextp = 0
  1457  }
  1458  
  1459  func mspinning() {
  1460  	gp := getg()
  1461  	if !runqempty(gp.m.nextp.ptr()) {
  1462  		// Something (presumably the GC) was readied while the
  1463  		// runtime was starting up this M, so the M is no
  1464  		// longer spinning.
  1465  		if int32(xadd(&sched.nmspinning, -1)) < 0 {
  1466  			throw("mspinning: nmspinning underflowed")
  1467  		}
  1468  	} else {
  1469  		gp.m.spinning = true
  1470  	}
  1471  }
  1472  
  1473  // Schedules some M to run the p (creates an M if necessary).
  1474  // If p==nil, tries to get an idle P, if no idle P's does nothing.
  1475  // May run with m.p==nil, so write barriers are not allowed.
  1476  //go:nowritebarrier
  1477  func startm(_p_ *p, spinning bool) {
  1478  	lock(&sched.lock)
  1479  	if _p_ == nil {
  1480  		_p_ = pidleget()
  1481  		if _p_ == nil {
  1482  			unlock(&sched.lock)
  1483  			if spinning {
  1484  				xadd(&sched.nmspinning, -1)
  1485  			}
  1486  			return
  1487  		}
  1488  	}
  1489  	mp := mget()
  1490  	unlock(&sched.lock)
  1491  	if mp == nil {
  1492  		var fn func()
  1493  		if spinning {
  1494  			fn = mspinning
  1495  		}
  1496  		newm(fn, _p_)
  1497  		return
  1498  	}
  1499  	if mp.spinning {
  1500  		throw("startm: m is spinning")
  1501  	}
  1502  	if mp.nextp != 0 {
  1503  		throw("startm: m has p")
  1504  	}
  1505  	if spinning && !runqempty(_p_) {
  1506  		throw("startm: p has runnable gs")
  1507  	}
  1508  	mp.spinning = spinning
  1509  	mp.nextp.set(_p_)
  1510  	notewakeup(&mp.park)
  1511  }
  1512  
  1513  // Hands off P from syscall or locked M.
  1514  // Always runs without a P, so write barriers are not allowed.
  1515  //go:nowritebarrier
  1516  func handoffp(_p_ *p) {
  1517  	// if it has local work, start it straight away
  1518  	if !runqempty(_p_) || sched.runqsize != 0 {
  1519  		startm(_p_, false)
  1520  		return
  1521  	}
  1522  	// no local work, check that there are no spinning/idle M's,
  1523  	// otherwise our help is not required
  1524  	if atomicload(&sched.nmspinning)+atomicload(&sched.npidle) == 0 && cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
  1525  		startm(_p_, true)
  1526  		return
  1527  	}
  1528  	lock(&sched.lock)
  1529  	if sched.gcwaiting != 0 {
  1530  		_p_.status = _Pgcstop
  1531  		sched.stopwait--
  1532  		if sched.stopwait == 0 {
  1533  			notewakeup(&sched.stopnote)
  1534  		}
  1535  		unlock(&sched.lock)
  1536  		return
  1537  	}
  1538  	if _p_.runSafePointFn != 0 && cas(&_p_.runSafePointFn, 1, 0) {
  1539  		sched.safePointFn(_p_)
  1540  		sched.safePointWait--
  1541  		if sched.safePointWait == 0 {
  1542  			notewakeup(&sched.safePointNote)
  1543  		}
  1544  	}
  1545  	if sched.runqsize != 0 {
  1546  		unlock(&sched.lock)
  1547  		startm(_p_, false)
  1548  		return
  1549  	}
  1550  	// If this is the last running P and nobody is polling network,
  1551  	// need to wakeup another M to poll network.
  1552  	if sched.npidle == uint32(gomaxprocs-1) && atomicload64(&sched.lastpoll) != 0 {
  1553  		unlock(&sched.lock)
  1554  		startm(_p_, false)
  1555  		return
  1556  	}
  1557  	pidleput(_p_)
  1558  	unlock(&sched.lock)
  1559  }
  1560  
  1561  // Tries to add one more P to execute G's.
  1562  // Called when a G is made runnable (newproc, ready).
  1563  func wakep() {
  1564  	// be conservative about spinning threads
  1565  	if !cas(&sched.nmspinning, 0, 1) {
  1566  		return
  1567  	}
  1568  	startm(nil, true)
  1569  }
  1570  
  1571  // Stops execution of the current m that is locked to a g until the g is runnable again.
  1572  // Returns with acquired P.
  1573  func stoplockedm() {
  1574  	_g_ := getg()
  1575  
  1576  	if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
  1577  		throw("stoplockedm: inconsistent locking")
  1578  	}
  1579  	if _g_.m.p != 0 {
  1580  		// Schedule another M to run this p.
  1581  		_p_ := releasep()
  1582  		handoffp(_p_)
  1583  	}
  1584  	incidlelocked(1)
  1585  	// Wait until another thread schedules lockedg again.
  1586  	notesleep(&_g_.m.park)
  1587  	noteclear(&_g_.m.park)
  1588  	status := readgstatus(_g_.m.lockedg)
  1589  	if status&^_Gscan != _Grunnable {
  1590  		print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
  1591  		dumpgstatus(_g_)
  1592  		throw("stoplockedm: not runnable")
  1593  	}
  1594  	acquirep(_g_.m.nextp.ptr())
  1595  	_g_.m.nextp = 0
  1596  }
  1597  
  1598  // Schedules the locked m to run the locked gp.
  1599  // May run during STW, so write barriers are not allowed.
  1600  //go:nowritebarrier
  1601  func startlockedm(gp *g) {
  1602  	_g_ := getg()
  1603  
  1604  	mp := gp.lockedm
  1605  	if mp == _g_.m {
  1606  		throw("startlockedm: locked to me")
  1607  	}
  1608  	if mp.nextp != 0 {
  1609  		throw("startlockedm: m has p")
  1610  	}
  1611  	// directly handoff current P to the locked m
  1612  	incidlelocked(-1)
  1613  	_p_ := releasep()
  1614  	mp.nextp.set(_p_)
  1615  	notewakeup(&mp.park)
  1616  	stopm()
  1617  }
  1618  
  1619  // Stops the current m for stopTheWorld.
  1620  // Returns when the world is restarted.
  1621  func gcstopm() {
  1622  	_g_ := getg()
  1623  
  1624  	if sched.gcwaiting == 0 {
  1625  		throw("gcstopm: not waiting for gc")
  1626  	}
  1627  	if _g_.m.spinning {
  1628  		_g_.m.spinning = false
  1629  		xadd(&sched.nmspinning, -1)
  1630  	}
  1631  	_p_ := releasep()
  1632  	lock(&sched.lock)
  1633  	_p_.status = _Pgcstop
  1634  	sched.stopwait--
  1635  	if sched.stopwait == 0 {
  1636  		notewakeup(&sched.stopnote)
  1637  	}
  1638  	unlock(&sched.lock)
  1639  	stopm()
  1640  }
  1641  
  1642  // Schedules gp to run on the current M.
  1643  // If inheritTime is true, gp inherits the remaining time in the
  1644  // current time slice. Otherwise, it starts a new time slice.
  1645  // Never returns.
  1646  func execute(gp *g, inheritTime bool) {
  1647  	_g_ := getg()
  1648  
  1649  	casgstatus(gp, _Grunnable, _Grunning)
  1650  	gp.waitsince = 0
  1651  	gp.preempt = false
  1652  	gp.stackguard0 = gp.stack.lo + _StackGuard
  1653  	if !inheritTime {
  1654  		_g_.m.p.ptr().schedtick++
  1655  	}
  1656  	_g_.m.curg = gp
  1657  	gp.m = _g_.m
  1658  
  1659  	// Check whether the profiler needs to be turned on or off.
  1660  	hz := sched.profilehz
  1661  	if _g_.m.profilehz != hz {
  1662  		resetcpuprofiler(hz)
  1663  	}
  1664  
  1665  	if trace.enabled {
  1666  		// GoSysExit has to happen when we have a P, but before GoStart.
  1667  		// So we emit it here.
  1668  		if gp.syscallsp != 0 && gp.sysblocktraced {
  1669  			// Since gp.sysblocktraced is true, we must emit an event.
  1670  			// There is a race between the code that initializes sysexitseq
  1671  			// and sysexitticks (in exitsyscall, which runs without a P,
  1672  			// and therefore is not stopped with the rest of the world)
  1673  			// and the code that initializes a new trace.
  1674  			// The recorded sysexitseq and sysexitticks must therefore
  1675  			// be treated as "best effort". If they are valid for this trace,
  1676  			// then great, use them for greater accuracy.
  1677  			// But if they're not valid for this trace, assume that the
  1678  			// trace was started after the actual syscall exit (but before
  1679  			// we actually managed to start the goroutine, aka right now),
  1680  			// and assign a fresh time stamp to keep the log consistent.
  1681  			seq, ts := gp.sysexitseq, gp.sysexitticks
  1682  			if seq == 0 || int64(seq)-int64(trace.seqStart) < 0 {
  1683  				seq, ts = tracestamp()
  1684  			}
  1685  			traceGoSysExit(seq, ts)
  1686  		}
  1687  		traceGoStart()
  1688  	}
  1689  
  1690  	gogo(&gp.sched)
  1691  }
  1692  
  1693  // Finds a runnable goroutine to execute.
  1694  // Tries to steal from other P's, get g from global queue, poll network.
  1695  func findrunnable() (gp *g, inheritTime bool) {
  1696  	_g_ := getg()
  1697  
  1698  top:
  1699  	if sched.gcwaiting != 0 {
  1700  		gcstopm()
  1701  		goto top
  1702  	}
  1703  	if _g_.m.p.ptr().runSafePointFn != 0 {
  1704  		runSafePointFn()
  1705  	}
  1706  	if fingwait && fingwake {
  1707  		if gp := wakefing(); gp != nil {
  1708  			ready(gp, 0)
  1709  		}
  1710  	}
  1711  
  1712  	// local runq
  1713  	if gp, inheritTime := runqget(_g_.m.p.ptr()); gp != nil {
  1714  		return gp, inheritTime
  1715  	}
  1716  
  1717  	// global runq
  1718  	if sched.runqsize != 0 {
  1719  		lock(&sched.lock)
  1720  		gp := globrunqget(_g_.m.p.ptr(), 0)
  1721  		unlock(&sched.lock)
  1722  		if gp != nil {
  1723  			return gp, false
  1724  		}
  1725  	}
  1726  
  1727  	// Poll network.
  1728  	// This netpoll is only an optimization before we resort to stealing.
  1729  	// We can safely skip it if there a thread blocked in netpoll already.
  1730  	// If there is any kind of logical race with that blocked thread
  1731  	// (e.g. it has already returned from netpoll, but does not set lastpoll yet),
  1732  	// this thread will do blocking netpoll below anyway.
  1733  	if netpollinited() && sched.lastpoll != 0 {
  1734  		if gp := netpoll(false); gp != nil { // non-blocking
  1735  			// netpoll returns list of goroutines linked by schedlink.
  1736  			injectglist(gp.schedlink.ptr())
  1737  			casgstatus(gp, _Gwaiting, _Grunnable)
  1738  			if trace.enabled {
  1739  				traceGoUnpark(gp, 0)
  1740  			}
  1741  			return gp, false
  1742  		}
  1743  	}
  1744  
  1745  	// If number of spinning M's >= number of busy P's, block.
  1746  	// This is necessary to prevent excessive CPU consumption
  1747  	// when GOMAXPROCS>>1 but the program parallelism is low.
  1748  	if !_g_.m.spinning && 2*atomicload(&sched.nmspinning) >= uint32(gomaxprocs)-atomicload(&sched.npidle) { // TODO: fast atomic
  1749  		goto stop
  1750  	}
  1751  	if !_g_.m.spinning {
  1752  		_g_.m.spinning = true
  1753  		xadd(&sched.nmspinning, 1)
  1754  	}
  1755  	// random steal from other P's
  1756  	for i := 0; i < int(4*gomaxprocs); i++ {
  1757  		if sched.gcwaiting != 0 {
  1758  			goto top
  1759  		}
  1760  		_p_ := allp[fastrand1()%uint32(gomaxprocs)]
  1761  		var gp *g
  1762  		if _p_ == _g_.m.p.ptr() {
  1763  			gp, _ = runqget(_p_)
  1764  		} else {
  1765  			stealRunNextG := i > 2*int(gomaxprocs) // first look for ready queues with more than 1 g
  1766  			gp = runqsteal(_g_.m.p.ptr(), _p_, stealRunNextG)
  1767  		}
  1768  		if gp != nil {
  1769  			return gp, false
  1770  		}
  1771  	}
  1772  
  1773  stop:
  1774  
  1775  	// We have nothing to do. If we're in the GC mark phase and can
  1776  	// safely scan and blacken objects, run idle-time marking
  1777  	// rather than give up the P.
  1778  	if _p_ := _g_.m.p.ptr(); gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != nil && gcMarkWorkAvailable(_p_) {
  1779  		_p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
  1780  		gp := _p_.gcBgMarkWorker
  1781  		casgstatus(gp, _Gwaiting, _Grunnable)
  1782  		if trace.enabled {
  1783  			traceGoUnpark(gp, 0)
  1784  		}
  1785  		return gp, false
  1786  	}
  1787  
  1788  	// return P and block
  1789  	lock(&sched.lock)
  1790  	if sched.gcwaiting != 0 || _g_.m.p.ptr().runSafePointFn != 0 {
  1791  		unlock(&sched.lock)
  1792  		goto top
  1793  	}
  1794  	if sched.runqsize != 0 {
  1795  		gp := globrunqget(_g_.m.p.ptr(), 0)
  1796  		unlock(&sched.lock)
  1797  		return gp, false
  1798  	}
  1799  	_p_ := releasep()
  1800  	pidleput(_p_)
  1801  	unlock(&sched.lock)
  1802  	if _g_.m.spinning {
  1803  		_g_.m.spinning = false
  1804  		xadd(&sched.nmspinning, -1)
  1805  	}
  1806  
  1807  	// check all runqueues once again
  1808  	for i := 0; i < int(gomaxprocs); i++ {
  1809  		_p_ := allp[i]
  1810  		if _p_ != nil && !runqempty(_p_) {
  1811  			lock(&sched.lock)
  1812  			_p_ = pidleget()
  1813  			unlock(&sched.lock)
  1814  			if _p_ != nil {
  1815  				acquirep(_p_)
  1816  				goto top
  1817  			}
  1818  			break
  1819  		}
  1820  	}
  1821  
  1822  	// poll network
  1823  	if netpollinited() && xchg64(&sched.lastpoll, 0) != 0 {
  1824  		if _g_.m.p != 0 {
  1825  			throw("findrunnable: netpoll with p")
  1826  		}
  1827  		if _g_.m.spinning {
  1828  			throw("findrunnable: netpoll with spinning")
  1829  		}
  1830  		gp := netpoll(true) // block until new work is available
  1831  		atomicstore64(&sched.lastpoll, uint64(nanotime()))
  1832  		if gp != nil {
  1833  			lock(&sched.lock)
  1834  			_p_ = pidleget()
  1835  			unlock(&sched.lock)
  1836  			if _p_ != nil {
  1837  				acquirep(_p_)
  1838  				injectglist(gp.schedlink.ptr())
  1839  				casgstatus(gp, _Gwaiting, _Grunnable)
  1840  				if trace.enabled {
  1841  					traceGoUnpark(gp, 0)
  1842  				}
  1843  				return gp, false
  1844  			}
  1845  			injectglist(gp)
  1846  		}
  1847  	}
  1848  	stopm()
  1849  	goto top
  1850  }
  1851  
  1852  func resetspinning() {
  1853  	_g_ := getg()
  1854  
  1855  	var nmspinning uint32
  1856  	if _g_.m.spinning {
  1857  		_g_.m.spinning = false
  1858  		nmspinning = xadd(&sched.nmspinning, -1)
  1859  		if int32(nmspinning) < 0 {
  1860  			throw("findrunnable: negative nmspinning")
  1861  		}
  1862  	} else {
  1863  		nmspinning = atomicload(&sched.nmspinning)
  1864  	}
  1865  
  1866  	// M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
  1867  	// so see if we need to wakeup another P here.
  1868  	if nmspinning == 0 && atomicload(&sched.npidle) > 0 {
  1869  		wakep()
  1870  	}
  1871  }
  1872  
  1873  // Injects the list of runnable G's into the scheduler.
  1874  // Can run concurrently with GC.
  1875  func injectglist(glist *g) {
  1876  	if glist == nil {
  1877  		return
  1878  	}
  1879  	if trace.enabled {
  1880  		for gp := glist; gp != nil; gp = gp.schedlink.ptr() {
  1881  			traceGoUnpark(gp, 0)
  1882  		}
  1883  	}
  1884  	lock(&sched.lock)
  1885  	var n int
  1886  	for n = 0; glist != nil; n++ {
  1887  		gp := glist
  1888  		glist = gp.schedlink.ptr()
  1889  		casgstatus(gp, _Gwaiting, _Grunnable)
  1890  		globrunqput(gp)
  1891  	}
  1892  	unlock(&sched.lock)
  1893  	for ; n != 0 && sched.npidle != 0; n-- {
  1894  		startm(nil, false)
  1895  	}
  1896  }
  1897  
  1898  // One round of scheduler: find a runnable goroutine and execute it.
  1899  // Never returns.
  1900  func schedule() {
  1901  	_g_ := getg()
  1902  
  1903  	if _g_.m.locks != 0 {
  1904  		throw("schedule: holding locks")
  1905  	}
  1906  
  1907  	if _g_.m.lockedg != nil {
  1908  		stoplockedm()
  1909  		execute(_g_.m.lockedg, false) // Never returns.
  1910  	}
  1911  
  1912  top:
  1913  	if sched.gcwaiting != 0 {
  1914  		gcstopm()
  1915  		goto top
  1916  	}
  1917  	if _g_.m.p.ptr().runSafePointFn != 0 {
  1918  		runSafePointFn()
  1919  	}
  1920  
  1921  	var gp *g
  1922  	var inheritTime bool
  1923  	if trace.enabled || trace.shutdown {
  1924  		gp = traceReader()
  1925  		if gp != nil {
  1926  			casgstatus(gp, _Gwaiting, _Grunnable)
  1927  			traceGoUnpark(gp, 0)
  1928  			resetspinning()
  1929  		}
  1930  	}
  1931  	if gp == nil && gcBlackenEnabled != 0 {
  1932  		gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
  1933  		if gp != nil {
  1934  			resetspinning()
  1935  		}
  1936  	}
  1937  	if gp == nil {
  1938  		// Check the global runnable queue once in a while to ensure fairness.
  1939  		// Otherwise two goroutines can completely occupy the local runqueue
  1940  		// by constantly respawning each other.
  1941  		if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
  1942  			lock(&sched.lock)
  1943  			gp = globrunqget(_g_.m.p.ptr(), 1)
  1944  			unlock(&sched.lock)
  1945  			if gp != nil {
  1946  				resetspinning()
  1947  			}
  1948  		}
  1949  	}
  1950  	if gp == nil {
  1951  		gp, inheritTime = runqget(_g_.m.p.ptr())
  1952  		if gp != nil && _g_.m.spinning {
  1953  			throw("schedule: spinning with local work")
  1954  		}
  1955  	}
  1956  	if gp == nil {
  1957  		gp, inheritTime = findrunnable() // blocks until work is available
  1958  		resetspinning()
  1959  	}
  1960  
  1961  	if gp.lockedm != nil {
  1962  		// Hands off own p to the locked m,
  1963  		// then blocks waiting for a new p.
  1964  		startlockedm(gp)
  1965  		goto top
  1966  	}
  1967  
  1968  	execute(gp, inheritTime)
  1969  }
  1970  
  1971  // dropg removes the association between m and the current goroutine m->curg (gp for short).
  1972  // Typically a caller sets gp's status away from Grunning and then
  1973  // immediately calls dropg to finish the job. The caller is also responsible
  1974  // for arranging that gp will be restarted using ready at an
  1975  // appropriate time. After calling dropg and arranging for gp to be
  1976  // readied later, the caller can do other work but eventually should
  1977  // call schedule to restart the scheduling of goroutines on this m.
  1978  func dropg() {
  1979  	_g_ := getg()
  1980  
  1981  	if _g_.m.lockedg == nil {
  1982  		_g_.m.curg.m = nil
  1983  		_g_.m.curg = nil
  1984  	}
  1985  }
  1986  
  1987  func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
  1988  	unlock((*mutex)(lock))
  1989  	return true
  1990  }
  1991  
  1992  // park continuation on g0.
  1993  func park_m(gp *g) {
  1994  	_g_ := getg()
  1995  
  1996  	if trace.enabled {
  1997  		traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp)
  1998  	}
  1999  
  2000  	casgstatus(gp, _Grunning, _Gwaiting)
  2001  	dropg()
  2002  
  2003  	if _g_.m.waitunlockf != nil {
  2004  		fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
  2005  		ok := fn(gp, _g_.m.waitlock)
  2006  		_g_.m.waitunlockf = nil
  2007  		_g_.m.waitlock = nil
  2008  		if !ok {
  2009  			if trace.enabled {
  2010  				traceGoUnpark(gp, 2)
  2011  			}
  2012  			casgstatus(gp, _Gwaiting, _Grunnable)
  2013  			execute(gp, true) // Schedule it back, never returns.
  2014  		}
  2015  	}
  2016  	schedule()
  2017  }
  2018  
  2019  func goschedImpl(gp *g) {
  2020  	status := readgstatus(gp)
  2021  	if status&^_Gscan != _Grunning {
  2022  		dumpgstatus(gp)
  2023  		throw("bad g status")
  2024  	}
  2025  	casgstatus(gp, _Grunning, _Grunnable)
  2026  	dropg()
  2027  	lock(&sched.lock)
  2028  	globrunqput(gp)
  2029  	unlock(&sched.lock)
  2030  
  2031  	schedule()
  2032  }
  2033  
  2034  // Gosched continuation on g0.
  2035  func gosched_m(gp *g) {
  2036  	if trace.enabled {
  2037  		traceGoSched()
  2038  	}
  2039  	goschedImpl(gp)
  2040  }
  2041  
  2042  func gopreempt_m(gp *g) {
  2043  	if trace.enabled {
  2044  		traceGoPreempt()
  2045  	}
  2046  	goschedImpl(gp)
  2047  }
  2048  
  2049  // Finishes execution of the current goroutine.
  2050  func goexit1() {
  2051  	if raceenabled {
  2052  		racegoend()
  2053  	}
  2054  	if trace.enabled {
  2055  		traceGoEnd()
  2056  	}
  2057  	mcall(goexit0)
  2058  }
  2059  
  2060  // goexit continuation on g0.
  2061  func goexit0(gp *g) {
  2062  	_g_ := getg()
  2063  
  2064  	casgstatus(gp, _Grunning, _Gdead)
  2065  	gp.m = nil
  2066  	gp.lockedm = nil
  2067  	_g_.m.lockedg = nil
  2068  	gp.paniconfault = false
  2069  	gp._defer = nil // should be true already but just in case.
  2070  	gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
  2071  	gp.writebuf = nil
  2072  	gp.waitreason = ""
  2073  	gp.param = nil
  2074  
  2075  	dropg()
  2076  
  2077  	if _g_.m.locked&^_LockExternal != 0 {
  2078  		print("invalid m->locked = ", _g_.m.locked, "\n")
  2079  		throw("internal lockOSThread error")
  2080  	}
  2081  	_g_.m.locked = 0
  2082  	gfput(_g_.m.p.ptr(), gp)
  2083  	schedule()
  2084  }
  2085  
  2086  //go:nosplit
  2087  //go:nowritebarrier
  2088  func save(pc, sp uintptr) {
  2089  	_g_ := getg()
  2090  
  2091  	_g_.sched.pc = pc
  2092  	_g_.sched.sp = sp
  2093  	_g_.sched.lr = 0
  2094  	_g_.sched.ret = 0
  2095  	_g_.sched.ctxt = nil
  2096  	_g_.sched.g = guintptr(unsafe.Pointer(_g_))
  2097  }
  2098  
  2099  // The goroutine g is about to enter a system call.
  2100  // Record that it's not using the cpu anymore.
  2101  // This is called only from the go syscall library and cgocall,
  2102  // not from the low-level system calls used by the runtime.
  2103  //
  2104  // Entersyscall cannot split the stack: the gosave must
  2105  // make g->sched refer to the caller's stack segment, because
  2106  // entersyscall is going to return immediately after.
  2107  //
  2108  // Nothing entersyscall calls can split the stack either.
  2109  // We cannot safely move the stack during an active call to syscall,
  2110  // because we do not know which of the uintptr arguments are
  2111  // really pointers (back into the stack).
  2112  // In practice, this means that we make the fast path run through
  2113  // entersyscall doing no-split things, and the slow path has to use systemstack
  2114  // to run bigger things on the system stack.
  2115  //
  2116  // reentersyscall is the entry point used by cgo callbacks, where explicitly
  2117  // saved SP and PC are restored. This is needed when exitsyscall will be called
  2118  // from a function further up in the call stack than the parent, as g->syscallsp
  2119  // must always point to a valid stack frame. entersyscall below is the normal
  2120  // entry point for syscalls, which obtains the SP and PC from the caller.
  2121  //
  2122  // Syscall tracing:
  2123  // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
  2124  // If the syscall does not block, that is it, we do not emit any other events.
  2125  // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
  2126  // when syscall returns we emit traceGoSysExit and when the goroutine starts running
  2127  // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
  2128  // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
  2129  // we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
  2130  // whoever emits traceGoSysBlock increments p.syscalltick afterwards;
  2131  // and we wait for the increment before emitting traceGoSysExit.
  2132  // Note that the increment is done even if tracing is not enabled,
  2133  // because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
  2134  //
  2135  //go:nosplit
  2136  func reentersyscall(pc, sp uintptr) {
  2137  	_g_ := getg()
  2138  
  2139  	// Disable preemption because during this function g is in Gsyscall status,
  2140  	// but can have inconsistent g->sched, do not let GC observe it.
  2141  	_g_.m.locks++
  2142  
  2143  	// Entersyscall must not call any function that might split/grow the stack.
  2144  	// (See details in comment above.)
  2145  	// Catch calls that might, by replacing the stack guard with something that
  2146  	// will trip any stack check and leaving a flag to tell newstack to die.
  2147  	_g_.stackguard0 = stackPreempt
  2148  	_g_.throwsplit = true
  2149  
  2150  	// Leave SP around for GC and traceback.
  2151  	save(pc, sp)
  2152  	_g_.syscallsp = sp
  2153  	_g_.syscallpc = pc
  2154  	casgstatus(_g_, _Grunning, _Gsyscall)
  2155  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  2156  		systemstack(func() {
  2157  			print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  2158  			throw("entersyscall")
  2159  		})
  2160  	}
  2161  
  2162  	if trace.enabled {
  2163  		systemstack(traceGoSysCall)
  2164  		// systemstack itself clobbers g.sched.{pc,sp} and we might
  2165  		// need them later when the G is genuinely blocked in a
  2166  		// syscall
  2167  		save(pc, sp)
  2168  	}
  2169  
  2170  	if atomicload(&sched.sysmonwait) != 0 { // TODO: fast atomic
  2171  		systemstack(entersyscall_sysmon)
  2172  		save(pc, sp)
  2173  	}
  2174  
  2175  	if _g_.m.p.ptr().runSafePointFn != 0 {
  2176  		// runSafePointFn may stack split if run on this stack
  2177  		systemstack(runSafePointFn)
  2178  		save(pc, sp)
  2179  	}
  2180  
  2181  	_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
  2182  	_g_.sysblocktraced = true
  2183  	_g_.m.mcache = nil
  2184  	_g_.m.p.ptr().m = 0
  2185  	atomicstore(&_g_.m.p.ptr().status, _Psyscall)
  2186  	if sched.gcwaiting != 0 {
  2187  		systemstack(entersyscall_gcwait)
  2188  		save(pc, sp)
  2189  	}
  2190  
  2191  	// Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
  2192  	// We set _StackGuard to StackPreempt so that first split stack check calls morestack.
  2193  	// Morestack detects this case and throws.
  2194  	_g_.stackguard0 = stackPreempt
  2195  	_g_.m.locks--
  2196  }
  2197  
  2198  // Standard syscall entry used by the go syscall library and normal cgo calls.
  2199  //go:nosplit
  2200  func entersyscall(dummy int32) {
  2201  	reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
  2202  }
  2203  
  2204  func entersyscall_sysmon() {
  2205  	lock(&sched.lock)
  2206  	if atomicload(&sched.sysmonwait) != 0 {
  2207  		atomicstore(&sched.sysmonwait, 0)
  2208  		notewakeup(&sched.sysmonnote)
  2209  	}
  2210  	unlock(&sched.lock)
  2211  }
  2212  
  2213  func entersyscall_gcwait() {
  2214  	_g_ := getg()
  2215  	_p_ := _g_.m.p.ptr()
  2216  
  2217  	lock(&sched.lock)
  2218  	if sched.stopwait > 0 && cas(&_p_.status, _Psyscall, _Pgcstop) {
  2219  		if trace.enabled {
  2220  			traceGoSysBlock(_p_)
  2221  			traceProcStop(_p_)
  2222  		}
  2223  		_p_.syscalltick++
  2224  		if sched.stopwait--; sched.stopwait == 0 {
  2225  			notewakeup(&sched.stopnote)
  2226  		}
  2227  	}
  2228  	unlock(&sched.lock)
  2229  }
  2230  
  2231  // The same as entersyscall(), but with a hint that the syscall is blocking.
  2232  //go:nosplit
  2233  func entersyscallblock(dummy int32) {
  2234  	_g_ := getg()
  2235  
  2236  	_g_.m.locks++ // see comment in entersyscall
  2237  	_g_.throwsplit = true
  2238  	_g_.stackguard0 = stackPreempt // see comment in entersyscall
  2239  	_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
  2240  	_g_.sysblocktraced = true
  2241  	_g_.m.p.ptr().syscalltick++
  2242  
  2243  	// Leave SP around for GC and traceback.
  2244  	pc := getcallerpc(unsafe.Pointer(&dummy))
  2245  	sp := getcallersp(unsafe.Pointer(&dummy))
  2246  	save(pc, sp)
  2247  	_g_.syscallsp = _g_.sched.sp
  2248  	_g_.syscallpc = _g_.sched.pc
  2249  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  2250  		sp1 := sp
  2251  		sp2 := _g_.sched.sp
  2252  		sp3 := _g_.syscallsp
  2253  		systemstack(func() {
  2254  			print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  2255  			throw("entersyscallblock")
  2256  		})
  2257  	}
  2258  	casgstatus(_g_, _Grunning, _Gsyscall)
  2259  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  2260  		systemstack(func() {
  2261  			print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  2262  			throw("entersyscallblock")
  2263  		})
  2264  	}
  2265  
  2266  	systemstack(entersyscallblock_handoff)
  2267  
  2268  	// Resave for traceback during blocked call.
  2269  	save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
  2270  
  2271  	_g_.m.locks--
  2272  }
  2273  
  2274  func entersyscallblock_handoff() {
  2275  	if trace.enabled {
  2276  		traceGoSysCall()
  2277  		traceGoSysBlock(getg().m.p.ptr())
  2278  	}
  2279  	handoffp(releasep())
  2280  }
  2281  
  2282  // The goroutine g exited its system call.
  2283  // Arrange for it to run on a cpu again.
  2284  // This is called only from the go syscall library, not
  2285  // from the low-level system calls used by the
  2286  //go:nosplit
  2287  func exitsyscall(dummy int32) {
  2288  	_g_ := getg()
  2289  
  2290  	_g_.m.locks++ // see comment in entersyscall
  2291  	if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp {
  2292  		throw("exitsyscall: syscall frame is no longer valid")
  2293  	}
  2294  
  2295  	_g_.waitsince = 0
  2296  	oldp := _g_.m.p.ptr()
  2297  	if exitsyscallfast() {
  2298  		if _g_.m.mcache == nil {
  2299  			throw("lost mcache")
  2300  		}
  2301  		if trace.enabled {
  2302  			if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
  2303  				systemstack(traceGoStart)
  2304  			}
  2305  		}
  2306  		// There's a cpu for us, so we can run.
  2307  		_g_.m.p.ptr().syscalltick++
  2308  		// We need to cas the status and scan before resuming...
  2309  		casgstatus(_g_, _Gsyscall, _Grunning)
  2310  
  2311  		// Garbage collector isn't running (since we are),
  2312  		// so okay to clear syscallsp.
  2313  		_g_.syscallsp = 0
  2314  		_g_.m.locks--
  2315  		if _g_.preempt {
  2316  			// restore the preemption request in case we've cleared it in newstack
  2317  			_g_.stackguard0 = stackPreempt
  2318  		} else {
  2319  			// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
  2320  			_g_.stackguard0 = _g_.stack.lo + _StackGuard
  2321  		}
  2322  		_g_.throwsplit = false
  2323  		return
  2324  	}
  2325  
  2326  	_g_.sysexitticks = 0
  2327  	_g_.sysexitseq = 0
  2328  	if trace.enabled {
  2329  		// Wait till traceGoSysBlock event is emitted.
  2330  		// This ensures consistency of the trace (the goroutine is started after it is blocked).
  2331  		for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
  2332  			osyield()
  2333  		}
  2334  		// We can't trace syscall exit right now because we don't have a P.
  2335  		// Tracing code can invoke write barriers that cannot run without a P.
  2336  		// So instead we remember the syscall exit time and emit the event
  2337  		// in execute when we have a P.
  2338  		_g_.sysexitseq, _g_.sysexitticks = tracestamp()
  2339  	}
  2340  
  2341  	_g_.m.locks--
  2342  
  2343  	// Call the scheduler.
  2344  	mcall(exitsyscall0)
  2345  
  2346  	if _g_.m.mcache == nil {
  2347  		throw("lost mcache")
  2348  	}
  2349  
  2350  	// Scheduler returned, so we're allowed to run now.
  2351  	// Delete the syscallsp information that we left for
  2352  	// the garbage collector during the system call.
  2353  	// Must wait until now because until gosched returns
  2354  	// we don't know for sure that the garbage collector
  2355  	// is not running.
  2356  	_g_.syscallsp = 0
  2357  	_g_.m.p.ptr().syscalltick++
  2358  	_g_.throwsplit = false
  2359  }
  2360  
  2361  //go:nosplit
  2362  func exitsyscallfast() bool {
  2363  	_g_ := getg()
  2364  
  2365  	// Freezetheworld sets stopwait but does not retake P's.
  2366  	if sched.stopwait == freezeStopWait {
  2367  		_g_.m.mcache = nil
  2368  		_g_.m.p = 0
  2369  		return false
  2370  	}
  2371  
  2372  	// Try to re-acquire the last P.
  2373  	if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
  2374  		// There's a cpu for us, so we can run.
  2375  		_g_.m.mcache = _g_.m.p.ptr().mcache
  2376  		_g_.m.p.ptr().m.set(_g_.m)
  2377  		if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
  2378  			if trace.enabled {
  2379  				// The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
  2380  				// traceGoSysBlock for this syscall was already emitted,
  2381  				// but here we effectively retake the p from the new syscall running on the same p.
  2382  				systemstack(func() {
  2383  					// Denote blocking of the new syscall.
  2384  					traceGoSysBlock(_g_.m.p.ptr())
  2385  					// Denote completion of the current syscall.
  2386  					traceGoSysExit(tracestamp())
  2387  				})
  2388  			}
  2389  			_g_.m.p.ptr().syscalltick++
  2390  		}
  2391  		return true
  2392  	}
  2393  
  2394  	// Try to get any other idle P.
  2395  	oldp := _g_.m.p.ptr()
  2396  	_g_.m.mcache = nil
  2397  	_g_.m.p = 0
  2398  	if sched.pidle != 0 {
  2399  		var ok bool
  2400  		systemstack(func() {
  2401  			ok = exitsyscallfast_pidle()
  2402  			if ok && trace.enabled {
  2403  				if oldp != nil {
  2404  					// Wait till traceGoSysBlock event is emitted.
  2405  					// This ensures consistency of the trace (the goroutine is started after it is blocked).
  2406  					for oldp.syscalltick == _g_.m.syscalltick {
  2407  						osyield()
  2408  					}
  2409  				}
  2410  				traceGoSysExit(tracestamp())
  2411  			}
  2412  		})
  2413  		if ok {
  2414  			return true
  2415  		}
  2416  	}
  2417  	return false
  2418  }
  2419  
  2420  func exitsyscallfast_pidle() bool {
  2421  	lock(&sched.lock)
  2422  	_p_ := pidleget()
  2423  	if _p_ != nil && atomicload(&sched.sysmonwait) != 0 {
  2424  		atomicstore(&sched.sysmonwait, 0)
  2425  		notewakeup(&sched.sysmonnote)
  2426  	}
  2427  	unlock(&sched.lock)
  2428  	if _p_ != nil {
  2429  		acquirep(_p_)
  2430  		return true
  2431  	}
  2432  	return false
  2433  }
  2434  
  2435  // exitsyscall slow path on g0.
  2436  // Failed to acquire P, enqueue gp as runnable.
  2437  func exitsyscall0(gp *g) {
  2438  	_g_ := getg()
  2439  
  2440  	casgstatus(gp, _Gsyscall, _Grunnable)
  2441  	dropg()
  2442  	lock(&sched.lock)
  2443  	_p_ := pidleget()
  2444  	if _p_ == nil {
  2445  		globrunqput(gp)
  2446  	} else if atomicload(&sched.sysmonwait) != 0 {
  2447  		atomicstore(&sched.sysmonwait, 0)
  2448  		notewakeup(&sched.sysmonnote)
  2449  	}
  2450  	unlock(&sched.lock)
  2451  	if _p_ != nil {
  2452  		acquirep(_p_)
  2453  		execute(gp, false) // Never returns.
  2454  	}
  2455  	if _g_.m.lockedg != nil {
  2456  		// Wait until another thread schedules gp and so m again.
  2457  		stoplockedm()
  2458  		execute(gp, false) // Never returns.
  2459  	}
  2460  	stopm()
  2461  	schedule() // Never returns.
  2462  }
  2463  
  2464  func beforefork() {
  2465  	gp := getg().m.curg
  2466  
  2467  	// Fork can hang if preempted with signals frequently enough (see issue 5517).
  2468  	// Ensure that we stay on the same M where we disable profiling.
  2469  	gp.m.locks++
  2470  	if gp.m.profilehz != 0 {
  2471  		resetcpuprofiler(0)
  2472  	}
  2473  
  2474  	// This function is called before fork in syscall package.
  2475  	// Code between fork and exec must not allocate memory nor even try to grow stack.
  2476  	// Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
  2477  	// runtime_AfterFork will undo this in parent process, but not in child.
  2478  	gp.stackguard0 = stackFork
  2479  }
  2480  
  2481  // Called from syscall package before fork.
  2482  //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
  2483  //go:nosplit
  2484  func syscall_runtime_BeforeFork() {
  2485  	systemstack(beforefork)
  2486  }
  2487  
  2488  func afterfork() {
  2489  	gp := getg().m.curg
  2490  
  2491  	// See the comment in beforefork.
  2492  	gp.stackguard0 = gp.stack.lo + _StackGuard
  2493  
  2494  	hz := sched.profilehz
  2495  	if hz != 0 {
  2496  		resetcpuprofiler(hz)
  2497  	}
  2498  	gp.m.locks--
  2499  }
  2500  
  2501  // Called from syscall package after fork in parent.
  2502  //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
  2503  //go:nosplit
  2504  func syscall_runtime_AfterFork() {
  2505  	systemstack(afterfork)
  2506  }
  2507  
  2508  // Allocate a new g, with a stack big enough for stacksize bytes.
  2509  func malg(stacksize int32) *g {
  2510  	newg := new(g)
  2511  	if stacksize >= 0 {
  2512  		stacksize = round2(_StackSystem + stacksize)
  2513  		systemstack(func() {
  2514  			newg.stack, newg.stkbar = stackalloc(uint32(stacksize))
  2515  		})
  2516  		newg.stackguard0 = newg.stack.lo + _StackGuard
  2517  		newg.stackguard1 = ^uintptr(0)
  2518  		newg.stackAlloc = uintptr(stacksize)
  2519  	}
  2520  	return newg
  2521  }
  2522  
  2523  // Create a new g running fn with siz bytes of arguments.
  2524  // Put it on the queue of g's waiting to run.
  2525  // The compiler turns a go statement into a call to this.
  2526  // Cannot split the stack because it assumes that the arguments
  2527  // are available sequentially after &fn; they would not be
  2528  // copied if a stack split occurred.
  2529  //go:nosplit
  2530  func newproc(siz int32, fn *funcval) {
  2531  	argp := add(unsafe.Pointer(&fn), ptrSize)
  2532  	pc := getcallerpc(unsafe.Pointer(&siz))
  2533  	systemstack(func() {
  2534  		newproc1(fn, (*uint8)(argp), siz, 0, pc)
  2535  	})
  2536  }
  2537  
  2538  // Create a new g running fn with narg bytes of arguments starting
  2539  // at argp and returning nret bytes of results.  callerpc is the
  2540  // address of the go statement that created this.  The new g is put
  2541  // on the queue of g's waiting to run.
  2542  func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g {
  2543  	_g_ := getg()
  2544  
  2545  	if fn == nil {
  2546  		_g_.m.throwing = -1 // do not dump full stacks
  2547  		throw("go of nil func value")
  2548  	}
  2549  	_g_.m.locks++ // disable preemption because it can be holding p in a local var
  2550  	siz := narg + nret
  2551  	siz = (siz + 7) &^ 7
  2552  
  2553  	// We could allocate a larger initial stack if necessary.
  2554  	// Not worth it: this is almost always an error.
  2555  	// 4*sizeof(uintreg): extra space added below
  2556  	// sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
  2557  	if siz >= _StackMin-4*regSize-regSize {
  2558  		throw("newproc: function arguments too large for new goroutine")
  2559  	}
  2560  
  2561  	_p_ := _g_.m.p.ptr()
  2562  	newg := gfget(_p_)
  2563  	if newg == nil {
  2564  		newg = malg(_StackMin)
  2565  		casgstatus(newg, _Gidle, _Gdead)
  2566  		allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
  2567  	}
  2568  	if newg.stack.hi == 0 {
  2569  		throw("newproc1: newg missing stack")
  2570  	}
  2571  
  2572  	if readgstatus(newg) != _Gdead {
  2573  		throw("newproc1: new g is not Gdead")
  2574  	}
  2575  
  2576  	totalSize := 4*regSize + uintptr(siz) + minFrameSize // extra space in case of reads slightly beyond frame
  2577  	totalSize += -totalSize & (spAlign - 1)              // align to spAlign
  2578  	sp := newg.stack.hi - totalSize
  2579  	spArg := sp
  2580  	if usesLR {
  2581  		// caller's LR
  2582  		*(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil
  2583  		spArg += minFrameSize
  2584  	}
  2585  	memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg))
  2586  
  2587  	memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
  2588  	newg.sched.sp = sp
  2589  	newg.stktopsp = sp
  2590  	newg.sched.pc = funcPC(goexit) + _PCQuantum // +PCQuantum so that previous instruction is in same function
  2591  	newg.sched.g = guintptr(unsafe.Pointer(newg))
  2592  	gostartcallfn(&newg.sched, fn)
  2593  	newg.gopc = callerpc
  2594  	newg.startpc = fn.fn
  2595  	casgstatus(newg, _Gdead, _Grunnable)
  2596  
  2597  	if _p_.goidcache == _p_.goidcacheend {
  2598  		// Sched.goidgen is the last allocated id,
  2599  		// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
  2600  		// At startup sched.goidgen=0, so main goroutine receives goid=1.
  2601  		_p_.goidcache = xadd64(&sched.goidgen, _GoidCacheBatch)
  2602  		_p_.goidcache -= _GoidCacheBatch - 1
  2603  		_p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
  2604  	}
  2605  	newg.goid = int64(_p_.goidcache)
  2606  	_p_.goidcache++
  2607  	if raceenabled {
  2608  		newg.racectx = racegostart(callerpc)
  2609  	}
  2610  	if trace.enabled {
  2611  		traceGoCreate(newg, newg.startpc)
  2612  	}
  2613  	runqput(_p_, newg, true)
  2614  
  2615  	if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic
  2616  		wakep()
  2617  	}
  2618  	_g_.m.locks--
  2619  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
  2620  		_g_.stackguard0 = stackPreempt
  2621  	}
  2622  	return newg
  2623  }
  2624  
  2625  // Put on gfree list.
  2626  // If local list is too long, transfer a batch to the global list.
  2627  func gfput(_p_ *p, gp *g) {
  2628  	if readgstatus(gp) != _Gdead {
  2629  		throw("gfput: bad status (not Gdead)")
  2630  	}
  2631  
  2632  	stksize := gp.stackAlloc
  2633  
  2634  	if stksize != _FixedStack {
  2635  		// non-standard stack size - free it.
  2636  		stackfree(gp.stack, gp.stackAlloc)
  2637  		gp.stack.lo = 0
  2638  		gp.stack.hi = 0
  2639  		gp.stackguard0 = 0
  2640  		gp.stkbar = nil
  2641  		gp.stkbarPos = 0
  2642  	} else {
  2643  		// Reset stack barriers.
  2644  		gp.stkbar = gp.stkbar[:0]
  2645  		gp.stkbarPos = 0
  2646  	}
  2647  
  2648  	gp.schedlink.set(_p_.gfree)
  2649  	_p_.gfree = gp
  2650  	_p_.gfreecnt++
  2651  	if _p_.gfreecnt >= 64 {
  2652  		lock(&sched.gflock)
  2653  		for _p_.gfreecnt >= 32 {
  2654  			_p_.gfreecnt--
  2655  			gp = _p_.gfree
  2656  			_p_.gfree = gp.schedlink.ptr()
  2657  			gp.schedlink.set(sched.gfree)
  2658  			sched.gfree = gp
  2659  			sched.ngfree++
  2660  		}
  2661  		unlock(&sched.gflock)
  2662  	}
  2663  }
  2664  
  2665  // Get from gfree list.
  2666  // If local list is empty, grab a batch from global list.
  2667  func gfget(_p_ *p) *g {
  2668  retry:
  2669  	gp := _p_.gfree
  2670  	if gp == nil && sched.gfree != nil {
  2671  		lock(&sched.gflock)
  2672  		for _p_.gfreecnt < 32 && sched.gfree != nil {
  2673  			_p_.gfreecnt++
  2674  			gp = sched.gfree
  2675  			sched.gfree = gp.schedlink.ptr()
  2676  			sched.ngfree--
  2677  			gp.schedlink.set(_p_.gfree)
  2678  			_p_.gfree = gp
  2679  		}
  2680  		unlock(&sched.gflock)
  2681  		goto retry
  2682  	}
  2683  	if gp != nil {
  2684  		_p_.gfree = gp.schedlink.ptr()
  2685  		_p_.gfreecnt--
  2686  		if gp.stack.lo == 0 {
  2687  			// Stack was deallocated in gfput.  Allocate a new one.
  2688  			systemstack(func() {
  2689  				gp.stack, gp.stkbar = stackalloc(_FixedStack)
  2690  			})
  2691  			gp.stackguard0 = gp.stack.lo + _StackGuard
  2692  			gp.stackAlloc = _FixedStack
  2693  		} else {
  2694  			if raceenabled {
  2695  				racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc)
  2696  			}
  2697  			if msanenabled {
  2698  				msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc)
  2699  			}
  2700  		}
  2701  	}
  2702  	return gp
  2703  }
  2704  
  2705  // Purge all cached G's from gfree list to the global list.
  2706  func gfpurge(_p_ *p) {
  2707  	lock(&sched.gflock)
  2708  	for _p_.gfreecnt != 0 {
  2709  		_p_.gfreecnt--
  2710  		gp := _p_.gfree
  2711  		_p_.gfree = gp.schedlink.ptr()
  2712  		gp.schedlink.set(sched.gfree)
  2713  		sched.gfree = gp
  2714  		sched.ngfree++
  2715  	}
  2716  	unlock(&sched.gflock)
  2717  }
  2718  
  2719  // Breakpoint executes a breakpoint trap.
  2720  func Breakpoint() {
  2721  	breakpoint()
  2722  }
  2723  
  2724  // dolockOSThread is called by LockOSThread and lockOSThread below
  2725  // after they modify m.locked. Do not allow preemption during this call,
  2726  // or else the m might be different in this function than in the caller.
  2727  //go:nosplit
  2728  func dolockOSThread() {
  2729  	_g_ := getg()
  2730  	_g_.m.lockedg = _g_
  2731  	_g_.lockedm = _g_.m
  2732  }
  2733  
  2734  //go:nosplit
  2735  
  2736  // LockOSThread wires the calling goroutine to its current operating system thread.
  2737  // Until the calling goroutine exits or calls UnlockOSThread, it will always
  2738  // execute in that thread, and no other goroutine can.
  2739  func LockOSThread() {
  2740  	getg().m.locked |= _LockExternal
  2741  	dolockOSThread()
  2742  }
  2743  
  2744  //go:nosplit
  2745  func lockOSThread() {
  2746  	getg().m.locked += _LockInternal
  2747  	dolockOSThread()
  2748  }
  2749  
  2750  // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
  2751  // after they update m->locked. Do not allow preemption during this call,
  2752  // or else the m might be in different in this function than in the caller.
  2753  //go:nosplit
  2754  func dounlockOSThread() {
  2755  	_g_ := getg()
  2756  	if _g_.m.locked != 0 {
  2757  		return
  2758  	}
  2759  	_g_.m.lockedg = nil
  2760  	_g_.lockedm = nil
  2761  }
  2762  
  2763  //go:nosplit
  2764  
  2765  // UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
  2766  // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
  2767  func UnlockOSThread() {
  2768  	getg().m.locked &^= _LockExternal
  2769  	dounlockOSThread()
  2770  }
  2771  
  2772  //go:nosplit
  2773  func unlockOSThread() {
  2774  	_g_ := getg()
  2775  	if _g_.m.locked < _LockInternal {
  2776  		systemstack(badunlockosthread)
  2777  	}
  2778  	_g_.m.locked -= _LockInternal
  2779  	dounlockOSThread()
  2780  }
  2781  
  2782  func badunlockosthread() {
  2783  	throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
  2784  }
  2785  
  2786  func gcount() int32 {
  2787  	n := int32(allglen) - sched.ngfree
  2788  	for i := 0; ; i++ {
  2789  		_p_ := allp[i]
  2790  		if _p_ == nil {
  2791  			break
  2792  		}
  2793  		n -= _p_.gfreecnt
  2794  	}
  2795  
  2796  	// All these variables can be changed concurrently, so the result can be inconsistent.
  2797  	// But at least the current goroutine is running.
  2798  	if n < 1 {
  2799  		n = 1
  2800  	}
  2801  	return n
  2802  }
  2803  
  2804  func mcount() int32 {
  2805  	return sched.mcount
  2806  }
  2807  
  2808  var prof struct {
  2809  	lock uint32
  2810  	hz   int32
  2811  }
  2812  
  2813  func _System()       { _System() }
  2814  func _ExternalCode() { _ExternalCode() }
  2815  func _GC()           { _GC() }
  2816  
  2817  // Called if we receive a SIGPROF signal.
  2818  func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
  2819  	if prof.hz == 0 {
  2820  		return
  2821  	}
  2822  
  2823  	// Profiling runs concurrently with GC, so it must not allocate.
  2824  	mp.mallocing++
  2825  
  2826  	// Coordinate with stack barrier insertion in scanstack.
  2827  	for !cas(&gp.stackLock, 0, 1) {
  2828  		osyield()
  2829  	}
  2830  
  2831  	// Define that a "user g" is a user-created goroutine, and a "system g"
  2832  	// is one that is m->g0 or m->gsignal.
  2833  	//
  2834  	// We might be interrupted for profiling halfway through a
  2835  	// goroutine switch. The switch involves updating three (or four) values:
  2836  	// g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
  2837  	// because once it gets updated the new g is running.
  2838  	//
  2839  	// When switching from a user g to a system g, LR is not considered live,
  2840  	// so the update only affects g, SP, and PC. Since PC must be last, there
  2841  	// the possible partial transitions in ordinary execution are (1) g alone is updated,
  2842  	// (2) both g and SP are updated, and (3) SP alone is updated.
  2843  	// If SP or g alone is updated, we can detect the partial transition by checking
  2844  	// whether the SP is within g's stack bounds. (We could also require that SP
  2845  	// be changed only after g, but the stack bounds check is needed by other
  2846  	// cases, so there is no need to impose an additional requirement.)
  2847  	//
  2848  	// There is one exceptional transition to a system g, not in ordinary execution.
  2849  	// When a signal arrives, the operating system starts the signal handler running
  2850  	// with an updated PC and SP. The g is updated last, at the beginning of the
  2851  	// handler. There are two reasons this is okay. First, until g is updated the
  2852  	// g and SP do not match, so the stack bounds check detects the partial transition.
  2853  	// Second, signal handlers currently run with signals disabled, so a profiling
  2854  	// signal cannot arrive during the handler.
  2855  	//
  2856  	// When switching from a system g to a user g, there are three possibilities.
  2857  	//
  2858  	// First, it may be that the g switch has no PC update, because the SP
  2859  	// either corresponds to a user g throughout (as in asmcgocall)
  2860  	// or because it has been arranged to look like a user g frame
  2861  	// (as in cgocallback_gofunc). In this case, since the entire
  2862  	// transition is a g+SP update, a partial transition updating just one of
  2863  	// those will be detected by the stack bounds check.
  2864  	//
  2865  	// Second, when returning from a signal handler, the PC and SP updates
  2866  	// are performed by the operating system in an atomic update, so the g
  2867  	// update must be done before them. The stack bounds check detects
  2868  	// the partial transition here, and (again) signal handlers run with signals
  2869  	// disabled, so a profiling signal cannot arrive then anyway.
  2870  	//
  2871  	// Third, the common case: it may be that the switch updates g, SP, and PC
  2872  	// separately. If the PC is within any of the functions that does this,
  2873  	// we don't ask for a traceback. C.F. the function setsSP for more about this.
  2874  	//
  2875  	// There is another apparently viable approach, recorded here in case
  2876  	// the "PC within setsSP function" check turns out not to be usable.
  2877  	// It would be possible to delay the update of either g or SP until immediately
  2878  	// before the PC update instruction. Then, because of the stack bounds check,
  2879  	// the only problematic interrupt point is just before that PC update instruction,
  2880  	// and the sigprof handler can detect that instruction and simulate stepping past
  2881  	// it in order to reach a consistent state. On ARM, the update of g must be made
  2882  	// in two places (in R10 and also in a TLS slot), so the delayed update would
  2883  	// need to be the SP update. The sigprof handler must read the instruction at
  2884  	// the current PC and if it was the known instruction (for example, JMP BX or
  2885  	// MOV R2, PC), use that other register in place of the PC value.
  2886  	// The biggest drawback to this solution is that it requires that we can tell
  2887  	// whether it's safe to read from the memory pointed at by PC.
  2888  	// In a correct program, we can test PC == nil and otherwise read,
  2889  	// but if a profiling signal happens at the instant that a program executes
  2890  	// a bad jump (before the program manages to handle the resulting fault)
  2891  	// the profiling handler could fault trying to read nonexistent memory.
  2892  	//
  2893  	// To recap, there are no constraints on the assembly being used for the
  2894  	// transition. We simply require that g and SP match and that the PC is not
  2895  	// in gogo.
  2896  	traceback := true
  2897  	if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) {
  2898  		traceback = false
  2899  	}
  2900  	var stk [maxCPUProfStack]uintptr
  2901  	n := 0
  2902  	if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
  2903  		// Cgo, we can't unwind and symbolize arbitrary C code,
  2904  		// so instead collect Go stack that leads to the cgo call.
  2905  		// This is especially important on windows, since all syscalls are cgo calls.
  2906  		n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0)
  2907  	} else if traceback {
  2908  		n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
  2909  	}
  2910  	if !traceback || n <= 0 {
  2911  		// Normal traceback is impossible or has failed.
  2912  		// See if it falls into several common cases.
  2913  		n = 0
  2914  		if GOOS == "windows" && n == 0 && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
  2915  			// Libcall, i.e. runtime syscall on windows.
  2916  			// Collect Go stack that leads to the call.
  2917  			n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
  2918  		}
  2919  		if n == 0 {
  2920  			// If all of the above has failed, account it against abstract "System" or "GC".
  2921  			n = 2
  2922  			// "ExternalCode" is better than "etext".
  2923  			if pc > firstmoduledata.etext {
  2924  				pc = funcPC(_ExternalCode) + _PCQuantum
  2925  			}
  2926  			stk[0] = pc
  2927  			if mp.preemptoff != "" || mp.helpgc != 0 {
  2928  				stk[1] = funcPC(_GC) + _PCQuantum
  2929  			} else {
  2930  				stk[1] = funcPC(_System) + _PCQuantum
  2931  			}
  2932  		}
  2933  	}
  2934  	atomicstore(&gp.stackLock, 0)
  2935  
  2936  	if prof.hz != 0 {
  2937  		// Simple cas-lock to coordinate with setcpuprofilerate.
  2938  		for !cas(&prof.lock, 0, 1) {
  2939  			osyield()
  2940  		}
  2941  		if prof.hz != 0 {
  2942  			cpuprof.add(stk[:n])
  2943  		}
  2944  		atomicstore(&prof.lock, 0)
  2945  	}
  2946  	mp.mallocing--
  2947  }
  2948  
  2949  // Reports whether a function will set the SP
  2950  // to an absolute value. Important that
  2951  // we don't traceback when these are at the bottom
  2952  // of the stack since we can't be sure that we will
  2953  // find the caller.
  2954  //
  2955  // If the function is not on the bottom of the stack
  2956  // we assume that it will have set it up so that traceback will be consistent,
  2957  // either by being a traceback terminating function
  2958  // or putting one on the stack at the right offset.
  2959  func setsSP(pc uintptr) bool {
  2960  	f := findfunc(pc)
  2961  	if f == nil {
  2962  		// couldn't find the function for this PC,
  2963  		// so assume the worst and stop traceback
  2964  		return true
  2965  	}
  2966  	switch f.entry {
  2967  	case gogoPC, systemstackPC, mcallPC, morestackPC:
  2968  		return true
  2969  	}
  2970  	return false
  2971  }
  2972  
  2973  // Arrange to call fn with a traceback hz times a second.
  2974  func setcpuprofilerate_m(hz int32) {
  2975  	// Force sane arguments.
  2976  	if hz < 0 {
  2977  		hz = 0
  2978  	}
  2979  
  2980  	// Disable preemption, otherwise we can be rescheduled to another thread
  2981  	// that has profiling enabled.
  2982  	_g_ := getg()
  2983  	_g_.m.locks++
  2984  
  2985  	// Stop profiler on this thread so that it is safe to lock prof.
  2986  	// if a profiling signal came in while we had prof locked,
  2987  	// it would deadlock.
  2988  	resetcpuprofiler(0)
  2989  
  2990  	for !cas(&prof.lock, 0, 1) {
  2991  		osyield()
  2992  	}
  2993  	prof.hz = hz
  2994  	atomicstore(&prof.lock, 0)
  2995  
  2996  	lock(&sched.lock)
  2997  	sched.profilehz = hz
  2998  	unlock(&sched.lock)
  2999  
  3000  	if hz != 0 {
  3001  		resetcpuprofiler(hz)
  3002  	}
  3003  
  3004  	_g_.m.locks--
  3005  }
  3006  
  3007  // Change number of processors.  The world is stopped, sched is locked.
  3008  // gcworkbufs are not being modified by either the GC or
  3009  // the write barrier code.
  3010  // Returns list of Ps with local work, they need to be scheduled by the caller.
  3011  func procresize(nprocs int32) *p {
  3012  	old := gomaxprocs
  3013  	if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs {
  3014  		throw("procresize: invalid arg")
  3015  	}
  3016  	if trace.enabled {
  3017  		traceGomaxprocs(nprocs)
  3018  	}
  3019  
  3020  	// update statistics
  3021  	now := nanotime()
  3022  	if sched.procresizetime != 0 {
  3023  		sched.totaltime += int64(old) * (now - sched.procresizetime)
  3024  	}
  3025  	sched.procresizetime = now
  3026  
  3027  	// initialize new P's
  3028  	for i := int32(0); i < nprocs; i++ {
  3029  		pp := allp[i]
  3030  		if pp == nil {
  3031  			pp = new(p)
  3032  			pp.id = i
  3033  			pp.status = _Pgcstop
  3034  			pp.sudogcache = pp.sudogbuf[:0]
  3035  			for i := range pp.deferpool {
  3036  				pp.deferpool[i] = pp.deferpoolbuf[i][:0]
  3037  			}
  3038  			atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
  3039  		}
  3040  		if pp.mcache == nil {
  3041  			if old == 0 && i == 0 {
  3042  				if getg().m.mcache == nil {
  3043  					throw("missing mcache?")
  3044  				}
  3045  				pp.mcache = getg().m.mcache // bootstrap
  3046  			} else {
  3047  				pp.mcache = allocmcache()
  3048  			}
  3049  		}
  3050  	}
  3051  
  3052  	// free unused P's
  3053  	for i := nprocs; i < old; i++ {
  3054  		p := allp[i]
  3055  		if trace.enabled {
  3056  			if p == getg().m.p.ptr() {
  3057  				// moving to p[0], pretend that we were descheduled
  3058  				// and then scheduled again to keep the trace sane.
  3059  				traceGoSched()
  3060  				traceProcStop(p)
  3061  			}
  3062  		}
  3063  		// move all runnable goroutines to the global queue
  3064  		for p.runqhead != p.runqtail {
  3065  			// pop from tail of local queue
  3066  			p.runqtail--
  3067  			gp := p.runq[p.runqtail%uint32(len(p.runq))]
  3068  			// push onto head of global queue
  3069  			globrunqputhead(gp)
  3070  		}
  3071  		if p.runnext != 0 {
  3072  			globrunqputhead(p.runnext.ptr())
  3073  			p.runnext = 0
  3074  		}
  3075  		// if there's a background worker, make it runnable and put
  3076  		// it on the global queue so it can clean itself up
  3077  		if p.gcBgMarkWorker != nil {
  3078  			casgstatus(p.gcBgMarkWorker, _Gwaiting, _Grunnable)
  3079  			if trace.enabled {
  3080  				traceGoUnpark(p.gcBgMarkWorker, 0)
  3081  			}
  3082  			globrunqput(p.gcBgMarkWorker)
  3083  			p.gcBgMarkWorker = nil
  3084  		}
  3085  		for i := range p.sudogbuf {
  3086  			p.sudogbuf[i] = nil
  3087  		}
  3088  		p.sudogcache = p.sudogbuf[:0]
  3089  		for i := range p.deferpool {
  3090  			for j := range p.deferpoolbuf[i] {
  3091  				p.deferpoolbuf[i][j] = nil
  3092  			}
  3093  			p.deferpool[i] = p.deferpoolbuf[i][:0]
  3094  		}
  3095  		freemcache(p.mcache)
  3096  		p.mcache = nil
  3097  		gfpurge(p)
  3098  		traceProcFree(p)
  3099  		p.status = _Pdead
  3100  		// can't free P itself because it can be referenced by an M in syscall
  3101  	}
  3102  
  3103  	_g_ := getg()
  3104  	if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
  3105  		// continue to use the current P
  3106  		_g_.m.p.ptr().status = _Prunning
  3107  	} else {
  3108  		// release the current P and acquire allp[0]
  3109  		if _g_.m.p != 0 {
  3110  			_g_.m.p.ptr().m = 0
  3111  		}
  3112  		_g_.m.p = 0
  3113  		_g_.m.mcache = nil
  3114  		p := allp[0]
  3115  		p.m = 0
  3116  		p.status = _Pidle
  3117  		acquirep(p)
  3118  		if trace.enabled {
  3119  			traceGoStart()
  3120  		}
  3121  	}
  3122  	var runnablePs *p
  3123  	for i := nprocs - 1; i >= 0; i-- {
  3124  		p := allp[i]
  3125  		if _g_.m.p.ptr() == p {
  3126  			continue
  3127  		}
  3128  		p.status = _Pidle
  3129  		if runqempty(p) {
  3130  			pidleput(p)
  3131  		} else {
  3132  			p.m.set(mget())
  3133  			p.link.set(runnablePs)
  3134  			runnablePs = p
  3135  		}
  3136  	}
  3137  	var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
  3138  	atomicstore((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
  3139  	return runnablePs
  3140  }
  3141  
  3142  // Associate p and the current m.
  3143  func acquirep(_p_ *p) {
  3144  	acquirep1(_p_)
  3145  
  3146  	// have p; write barriers now allowed
  3147  	_g_ := getg()
  3148  	_g_.m.mcache = _p_.mcache
  3149  
  3150  	if trace.enabled {
  3151  		traceProcStart()
  3152  	}
  3153  }
  3154  
  3155  // May run during STW, so write barriers are not allowed.
  3156  //go:nowritebarrier
  3157  func acquirep1(_p_ *p) {
  3158  	_g_ := getg()
  3159  
  3160  	if _g_.m.p != 0 || _g_.m.mcache != nil {
  3161  		throw("acquirep: already in go")
  3162  	}
  3163  	if _p_.m != 0 || _p_.status != _Pidle {
  3164  		id := int32(0)
  3165  		if _p_.m != 0 {
  3166  			id = _p_.m.ptr().id
  3167  		}
  3168  		print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
  3169  		throw("acquirep: invalid p state")
  3170  	}
  3171  	_g_.m.p.set(_p_)
  3172  	_p_.m.set(_g_.m)
  3173  	_p_.status = _Prunning
  3174  }
  3175  
  3176  // Disassociate p and the current m.
  3177  func releasep() *p {
  3178  	_g_ := getg()
  3179  
  3180  	if _g_.m.p == 0 || _g_.m.mcache == nil {
  3181  		throw("releasep: invalid arg")
  3182  	}
  3183  	_p_ := _g_.m.p.ptr()
  3184  	if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
  3185  		print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
  3186  		throw("releasep: invalid p state")
  3187  	}
  3188  	if trace.enabled {
  3189  		traceProcStop(_g_.m.p.ptr())
  3190  	}
  3191  	_g_.m.p = 0
  3192  	_g_.m.mcache = nil
  3193  	_p_.m = 0
  3194  	_p_.status = _Pidle
  3195  	return _p_
  3196  }
  3197  
  3198  func incidlelocked(v int32) {
  3199  	lock(&sched.lock)
  3200  	sched.nmidlelocked += v
  3201  	if v > 0 {
  3202  		checkdead()
  3203  	}
  3204  	unlock(&sched.lock)
  3205  }
  3206  
  3207  // Check for deadlock situation.
  3208  // The check is based on number of running M's, if 0 -> deadlock.
  3209  func checkdead() {
  3210  	// For -buildmode=c-shared or -buildmode=c-archive it's OK if
  3211  	// there are no running goroutines.  The calling program is
  3212  	// assumed to be running.
  3213  	if islibrary || isarchive {
  3214  		return
  3215  	}
  3216  
  3217  	// If we are dying because of a signal caught on an already idle thread,
  3218  	// freezetheworld will cause all running threads to block.
  3219  	// And runtime will essentially enter into deadlock state,
  3220  	// except that there is a thread that will call exit soon.
  3221  	if panicking > 0 {
  3222  		return
  3223  	}
  3224  
  3225  	// -1 for sysmon
  3226  	run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1
  3227  	if run > 0 {
  3228  		return
  3229  	}
  3230  	if run < 0 {
  3231  		print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n")
  3232  		throw("checkdead: inconsistent counts")
  3233  	}
  3234  
  3235  	grunning := 0
  3236  	lock(&allglock)
  3237  	for i := 0; i < len(allgs); i++ {
  3238  		gp := allgs[i]
  3239  		if isSystemGoroutine(gp) {
  3240  			continue
  3241  		}
  3242  		s := readgstatus(gp)
  3243  		switch s &^ _Gscan {
  3244  		case _Gwaiting:
  3245  			grunning++
  3246  		case _Grunnable,
  3247  			_Grunning,
  3248  			_Gsyscall:
  3249  			unlock(&allglock)
  3250  			print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
  3251  			throw("checkdead: runnable g")
  3252  		}
  3253  	}
  3254  	unlock(&allglock)
  3255  	if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
  3256  		throw("no goroutines (main called runtime.Goexit) - deadlock!")
  3257  	}
  3258  
  3259  	// Maybe jump time forward for playground.
  3260  	gp := timejump()
  3261  	if gp != nil {
  3262  		casgstatus(gp, _Gwaiting, _Grunnable)
  3263  		globrunqput(gp)
  3264  		_p_ := pidleget()
  3265  		if _p_ == nil {
  3266  			throw("checkdead: no p for timer")
  3267  		}
  3268  		mp := mget()
  3269  		if mp == nil {
  3270  			newm(nil, _p_)
  3271  		} else {
  3272  			mp.nextp.set(_p_)
  3273  			notewakeup(&mp.park)
  3274  		}
  3275  		return
  3276  	}
  3277  
  3278  	getg().m.throwing = -1 // do not dump full stacks
  3279  	throw("all goroutines are asleep - deadlock!")
  3280  }
  3281  
  3282  // forcegcperiod is the maximum time in nanoseconds between garbage
  3283  // collections. If we go this long without a garbage collection, one
  3284  // is forced to run.
  3285  //
  3286  // This is a variable for testing purposes. It normally doesn't change.
  3287  var forcegcperiod int64 = 2 * 60 * 1e9
  3288  
  3289  func sysmon() {
  3290  	// If a heap span goes unused for 5 minutes after a garbage collection,
  3291  	// we hand it back to the operating system.
  3292  	scavengelimit := int64(5 * 60 * 1e9)
  3293  
  3294  	if debug.scavenge > 0 {
  3295  		// Scavenge-a-lot for testing.
  3296  		forcegcperiod = 10 * 1e6
  3297  		scavengelimit = 20 * 1e6
  3298  	}
  3299  
  3300  	lastscavenge := nanotime()
  3301  	nscavenge := 0
  3302  
  3303  	lasttrace := int64(0)
  3304  	idle := 0 // how many cycles in succession we had not wokeup somebody
  3305  	delay := uint32(0)
  3306  	for {
  3307  		if idle == 0 { // start with 20us sleep...
  3308  			delay = 20
  3309  		} else if idle > 50 { // start doubling the sleep after 1ms...
  3310  			delay *= 2
  3311  		}
  3312  		if delay > 10*1000 { // up to 10ms
  3313  			delay = 10 * 1000
  3314  		}
  3315  		usleep(delay)
  3316  		if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic
  3317  			lock(&sched.lock)
  3318  			if atomicload(&sched.gcwaiting) != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs) {
  3319  				atomicstore(&sched.sysmonwait, 1)
  3320  				unlock(&sched.lock)
  3321  				// Make wake-up period small enough
  3322  				// for the sampling to be correct.
  3323  				maxsleep := forcegcperiod / 2
  3324  				if scavengelimit < forcegcperiod {
  3325  					maxsleep = scavengelimit / 2
  3326  				}
  3327  				notetsleep(&sched.sysmonnote, maxsleep)
  3328  				lock(&sched.lock)
  3329  				atomicstore(&sched.sysmonwait, 0)
  3330  				noteclear(&sched.sysmonnote)
  3331  				idle = 0
  3332  				delay = 20
  3333  			}
  3334  			unlock(&sched.lock)
  3335  		}
  3336  		// poll network if not polled for more than 10ms
  3337  		lastpoll := int64(atomicload64(&sched.lastpoll))
  3338  		now := nanotime()
  3339  		unixnow := unixnanotime()
  3340  		if lastpoll != 0 && lastpoll+10*1000*1000 < now {
  3341  			cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
  3342  			gp := netpoll(false) // non-blocking - returns list of goroutines
  3343  			if gp != nil {
  3344  				// Need to decrement number of idle locked M's
  3345  				// (pretending that one more is running) before injectglist.
  3346  				// Otherwise it can lead to the following situation:
  3347  				// injectglist grabs all P's but before it starts M's to run the P's,
  3348  				// another M returns from syscall, finishes running its G,
  3349  				// observes that there is no work to do and no other running M's
  3350  				// and reports deadlock.
  3351  				incidlelocked(-1)
  3352  				injectglist(gp)
  3353  				incidlelocked(1)
  3354  			}
  3355  		}
  3356  		// retake P's blocked in syscalls
  3357  		// and preempt long running G's
  3358  		if retake(now) != 0 {
  3359  			idle = 0
  3360  		} else {
  3361  			idle++
  3362  		}
  3363  		// check if we need to force a GC
  3364  		lastgc := int64(atomicload64(&memstats.last_gc))
  3365  		if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomicload(&forcegc.idle) != 0 && atomicloaduint(&bggc.working) == 0 {
  3366  			lock(&forcegc.lock)
  3367  			forcegc.idle = 0
  3368  			forcegc.g.schedlink = 0
  3369  			injectglist(forcegc.g)
  3370  			unlock(&forcegc.lock)
  3371  		}
  3372  		// scavenge heap once in a while
  3373  		if lastscavenge+scavengelimit/2 < now {
  3374  			mHeap_Scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
  3375  			lastscavenge = now
  3376  			nscavenge++
  3377  		}
  3378  		if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace*1000000) <= now {
  3379  			lasttrace = now
  3380  			schedtrace(debug.scheddetail > 0)
  3381  		}
  3382  	}
  3383  }
  3384  
  3385  var pdesc [_MaxGomaxprocs]struct {
  3386  	schedtick   uint32
  3387  	schedwhen   int64
  3388  	syscalltick uint32
  3389  	syscallwhen int64
  3390  }
  3391  
  3392  // forcePreemptNS is the time slice given to a G before it is
  3393  // preempted.
  3394  const forcePreemptNS = 10 * 1000 * 1000 // 10ms
  3395  
  3396  func retake(now int64) uint32 {
  3397  	n := 0
  3398  	for i := int32(0); i < gomaxprocs; i++ {
  3399  		_p_ := allp[i]
  3400  		if _p_ == nil {
  3401  			continue
  3402  		}
  3403  		pd := &pdesc[i]
  3404  		s := _p_.status
  3405  		if s == _Psyscall {
  3406  			// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
  3407  			t := int64(_p_.syscalltick)
  3408  			if int64(pd.syscalltick) != t {
  3409  				pd.syscalltick = uint32(t)
  3410  				pd.syscallwhen = now
  3411  				continue
  3412  			}
  3413  			// On the one hand we don't want to retake Ps if there is no other work to do,
  3414  			// but on the other hand we want to retake them eventually
  3415  			// because they can prevent the sysmon thread from deep sleep.
  3416  			if runqempty(_p_) && atomicload(&sched.nmspinning)+atomicload(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
  3417  				continue
  3418  			}
  3419  			// Need to decrement number of idle locked M's
  3420  			// (pretending that one more is running) before the CAS.
  3421  			// Otherwise the M from which we retake can exit the syscall,
  3422  			// increment nmidle and report deadlock.
  3423  			incidlelocked(-1)
  3424  			if cas(&_p_.status, s, _Pidle) {
  3425  				if trace.enabled {
  3426  					traceGoSysBlock(_p_)
  3427  					traceProcStop(_p_)
  3428  				}
  3429  				n++
  3430  				_p_.syscalltick++
  3431  				handoffp(_p_)
  3432  			}
  3433  			incidlelocked(1)
  3434  		} else if s == _Prunning {
  3435  			// Preempt G if it's running for too long.
  3436  			t := int64(_p_.schedtick)
  3437  			if int64(pd.schedtick) != t {
  3438  				pd.schedtick = uint32(t)
  3439  				pd.schedwhen = now
  3440  				continue
  3441  			}
  3442  			if pd.schedwhen+forcePreemptNS > now {
  3443  				continue
  3444  			}
  3445  			preemptone(_p_)
  3446  		}
  3447  	}
  3448  	return uint32(n)
  3449  }
  3450  
  3451  // Tell all goroutines that they have been preempted and they should stop.
  3452  // This function is purely best-effort.  It can fail to inform a goroutine if a
  3453  // processor just started running it.
  3454  // No locks need to be held.
  3455  // Returns true if preemption request was issued to at least one goroutine.
  3456  func preemptall() bool {
  3457  	res := false
  3458  	for i := int32(0); i < gomaxprocs; i++ {
  3459  		_p_ := allp[i]
  3460  		if _p_ == nil || _p_.status != _Prunning {
  3461  			continue
  3462  		}
  3463  		if preemptone(_p_) {
  3464  			res = true
  3465  		}
  3466  	}
  3467  	return res
  3468  }
  3469  
  3470  // Tell the goroutine running on processor P to stop.
  3471  // This function is purely best-effort.  It can incorrectly fail to inform the
  3472  // goroutine.  It can send inform the wrong goroutine.  Even if it informs the
  3473  // correct goroutine, that goroutine might ignore the request if it is
  3474  // simultaneously executing newstack.
  3475  // No lock needs to be held.
  3476  // Returns true if preemption request was issued.
  3477  // The actual preemption will happen at some point in the future
  3478  // and will be indicated by the gp->status no longer being
  3479  // Grunning
  3480  func preemptone(_p_ *p) bool {
  3481  	mp := _p_.m.ptr()
  3482  	if mp == nil || mp == getg().m {
  3483  		return false
  3484  	}
  3485  	gp := mp.curg
  3486  	if gp == nil || gp == mp.g0 {
  3487  		return false
  3488  	}
  3489  
  3490  	gp.preempt = true
  3491  
  3492  	// Every call in a go routine checks for stack overflow by
  3493  	// comparing the current stack pointer to gp->stackguard0.
  3494  	// Setting gp->stackguard0 to StackPreempt folds
  3495  	// preemption into the normal stack overflow check.
  3496  	gp.stackguard0 = stackPreempt
  3497  	return true
  3498  }
  3499  
  3500  var starttime int64
  3501  
  3502  func schedtrace(detailed bool) {
  3503  	now := nanotime()
  3504  	if starttime == 0 {
  3505  		starttime = now
  3506  	}
  3507  
  3508  	lock(&sched.lock)
  3509  	print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
  3510  	if detailed {
  3511  		print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
  3512  	}
  3513  	// We must be careful while reading data from P's, M's and G's.
  3514  	// Even if we hold schedlock, most data can be changed concurrently.
  3515  	// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
  3516  	for i := int32(0); i < gomaxprocs; i++ {
  3517  		_p_ := allp[i]
  3518  		if _p_ == nil {
  3519  			continue
  3520  		}
  3521  		mp := _p_.m.ptr()
  3522  		h := atomicload(&_p_.runqhead)
  3523  		t := atomicload(&_p_.runqtail)
  3524  		if detailed {
  3525  			id := int32(-1)
  3526  			if mp != nil {
  3527  				id = mp.id
  3528  			}
  3529  			print("  P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
  3530  		} else {
  3531  			// In non-detailed mode format lengths of per-P run queues as:
  3532  			// [len1 len2 len3 len4]
  3533  			print(" ")
  3534  			if i == 0 {
  3535  				print("[")
  3536  			}
  3537  			print(t - h)
  3538  			if i == gomaxprocs-1 {
  3539  				print("]\n")
  3540  			}
  3541  		}
  3542  	}
  3543  
  3544  	if !detailed {
  3545  		unlock(&sched.lock)
  3546  		return
  3547  	}
  3548  
  3549  	for mp := allm; mp != nil; mp = mp.alllink {
  3550  		_p_ := mp.p.ptr()
  3551  		gp := mp.curg
  3552  		lockedg := mp.lockedg
  3553  		id1 := int32(-1)
  3554  		if _p_ != nil {
  3555  			id1 = _p_.id
  3556  		}
  3557  		id2 := int64(-1)
  3558  		if gp != nil {
  3559  			id2 = gp.goid
  3560  		}
  3561  		id3 := int64(-1)
  3562  		if lockedg != nil {
  3563  			id3 = lockedg.goid
  3564  		}
  3565  		print("  M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", getg().m.blocked, " lockedg=", id3, "\n")
  3566  	}
  3567  
  3568  	lock(&allglock)
  3569  	for gi := 0; gi < len(allgs); gi++ {
  3570  		gp := allgs[gi]
  3571  		mp := gp.m
  3572  		lockedm := gp.lockedm
  3573  		id1 := int32(-1)
  3574  		if mp != nil {
  3575  			id1 = mp.id
  3576  		}
  3577  		id2 := int32(-1)
  3578  		if lockedm != nil {
  3579  			id2 = lockedm.id
  3580  		}
  3581  		print("  G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
  3582  	}
  3583  	unlock(&allglock)
  3584  	unlock(&sched.lock)
  3585  }
  3586  
  3587  // Put mp on midle list.
  3588  // Sched must be locked.
  3589  // May run during STW, so write barriers are not allowed.
  3590  //go:nowritebarrier
  3591  func mput(mp *m) {
  3592  	mp.schedlink = sched.midle
  3593  	sched.midle.set(mp)
  3594  	sched.nmidle++
  3595  	checkdead()
  3596  }
  3597  
  3598  // Try to get an m from midle list.
  3599  // Sched must be locked.
  3600  // May run during STW, so write barriers are not allowed.
  3601  //go:nowritebarrier
  3602  func mget() *m {
  3603  	mp := sched.midle.ptr()
  3604  	if mp != nil {
  3605  		sched.midle = mp.schedlink
  3606  		sched.nmidle--
  3607  	}
  3608  	return mp
  3609  }
  3610  
  3611  // Put gp on the global runnable queue.
  3612  // Sched must be locked.
  3613  // May run during STW, so write barriers are not allowed.
  3614  //go:nowritebarrier
  3615  func globrunqput(gp *g) {
  3616  	gp.schedlink = 0
  3617  	if sched.runqtail != 0 {
  3618  		sched.runqtail.ptr().schedlink.set(gp)
  3619  	} else {
  3620  		sched.runqhead.set(gp)
  3621  	}
  3622  	sched.runqtail.set(gp)
  3623  	sched.runqsize++
  3624  }
  3625  
  3626  // Put gp at the head of the global runnable queue.
  3627  // Sched must be locked.
  3628  // May run during STW, so write barriers are not allowed.
  3629  //go:nowritebarrier
  3630  func globrunqputhead(gp *g) {
  3631  	gp.schedlink = sched.runqhead
  3632  	sched.runqhead.set(gp)
  3633  	if sched.runqtail == 0 {
  3634  		sched.runqtail.set(gp)
  3635  	}
  3636  	sched.runqsize++
  3637  }
  3638  
  3639  // Put a batch of runnable goroutines on the global runnable queue.
  3640  // Sched must be locked.
  3641  func globrunqputbatch(ghead *g, gtail *g, n int32) {
  3642  	gtail.schedlink = 0
  3643  	if sched.runqtail != 0 {
  3644  		sched.runqtail.ptr().schedlink.set(ghead)
  3645  	} else {
  3646  		sched.runqhead.set(ghead)
  3647  	}
  3648  	sched.runqtail.set(gtail)
  3649  	sched.runqsize += n
  3650  }
  3651  
  3652  // Try get a batch of G's from the global runnable queue.
  3653  // Sched must be locked.
  3654  func globrunqget(_p_ *p, max int32) *g {
  3655  	if sched.runqsize == 0 {
  3656  		return nil
  3657  	}
  3658  
  3659  	n := sched.runqsize/gomaxprocs + 1
  3660  	if n > sched.runqsize {
  3661  		n = sched.runqsize
  3662  	}
  3663  	if max > 0 && n > max {
  3664  		n = max
  3665  	}
  3666  	if n > int32(len(_p_.runq))/2 {
  3667  		n = int32(len(_p_.runq)) / 2
  3668  	}
  3669  
  3670  	sched.runqsize -= n
  3671  	if sched.runqsize == 0 {
  3672  		sched.runqtail = 0
  3673  	}
  3674  
  3675  	gp := sched.runqhead.ptr()
  3676  	sched.runqhead = gp.schedlink
  3677  	n--
  3678  	for ; n > 0; n-- {
  3679  		gp1 := sched.runqhead.ptr()
  3680  		sched.runqhead = gp1.schedlink
  3681  		runqput(_p_, gp1, false)
  3682  	}
  3683  	return gp
  3684  }
  3685  
  3686  // Put p to on _Pidle list.
  3687  // Sched must be locked.
  3688  // May run during STW, so write barriers are not allowed.
  3689  //go:nowritebarrier
  3690  func pidleput(_p_ *p) {
  3691  	if !runqempty(_p_) {
  3692  		throw("pidleput: P has non-empty run queue")
  3693  	}
  3694  	_p_.link = sched.pidle
  3695  	sched.pidle.set(_p_)
  3696  	xadd(&sched.npidle, 1) // TODO: fast atomic
  3697  }
  3698  
  3699  // Try get a p from _Pidle list.
  3700  // Sched must be locked.
  3701  // May run during STW, so write barriers are not allowed.
  3702  //go:nowritebarrier
  3703  func pidleget() *p {
  3704  	_p_ := sched.pidle.ptr()
  3705  	if _p_ != nil {
  3706  		sched.pidle = _p_.link
  3707  		xadd(&sched.npidle, -1) // TODO: fast atomic
  3708  	}
  3709  	return _p_
  3710  }
  3711  
  3712  // runqempty returns true if _p_ has no Gs on its local run queue.
  3713  // Note that this test is generally racy.
  3714  func runqempty(_p_ *p) bool {
  3715  	return _p_.runqhead == _p_.runqtail && _p_.runnext == 0
  3716  }
  3717  
  3718  // To shake out latent assumptions about scheduling order,
  3719  // we introduce some randomness into scheduling decisions
  3720  // when running with the race detector.
  3721  // The need for this was made obvious by changing the
  3722  // (deterministic) scheduling order in Go 1.5 and breaking
  3723  // many poorly-written tests.
  3724  // With the randomness here, as long as the tests pass
  3725  // consistently with -race, they shouldn't have latent scheduling
  3726  // assumptions.
  3727  const randomizeScheduler = raceenabled
  3728  
  3729  // runqput tries to put g on the local runnable queue.
  3730  // If next if false, runqput adds g to the tail of the runnable queue.
  3731  // If next is true, runqput puts g in the _p_.runnext slot.
  3732  // If the run queue is full, runnext puts g on the global queue.
  3733  // Executed only by the owner P.
  3734  func runqput(_p_ *p, gp *g, next bool) {
  3735  	if randomizeScheduler && next && fastrand1()%2 == 0 {
  3736  		next = false
  3737  	}
  3738  
  3739  	if next {
  3740  	retryNext:
  3741  		oldnext := _p_.runnext
  3742  		if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
  3743  			goto retryNext
  3744  		}
  3745  		if oldnext == 0 {
  3746  			return
  3747  		}
  3748  		// Kick the old runnext out to the regular run queue.
  3749  		gp = oldnext.ptr()
  3750  	}
  3751  
  3752  retry:
  3753  	h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
  3754  	t := _p_.runqtail
  3755  	if t-h < uint32(len(_p_.runq)) {
  3756  		_p_.runq[t%uint32(len(_p_.runq))] = gp
  3757  		atomicstore(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
  3758  		return
  3759  	}
  3760  	if runqputslow(_p_, gp, h, t) {
  3761  		return
  3762  	}
  3763  	// the queue is not full, now the put above must suceed
  3764  	goto retry
  3765  }
  3766  
  3767  // Put g and a batch of work from local runnable queue on global queue.
  3768  // Executed only by the owner P.
  3769  func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
  3770  	var batch [len(_p_.runq)/2 + 1]*g
  3771  
  3772  	// First, grab a batch from local queue.
  3773  	n := t - h
  3774  	n = n / 2
  3775  	if n != uint32(len(_p_.runq)/2) {
  3776  		throw("runqputslow: queue is not full")
  3777  	}
  3778  	for i := uint32(0); i < n; i++ {
  3779  		batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))]
  3780  	}
  3781  	if !cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
  3782  		return false
  3783  	}
  3784  	batch[n] = gp
  3785  
  3786  	if randomizeScheduler {
  3787  		for i := uint32(1); i <= n; i++ {
  3788  			j := fastrand1() % (i + 1)
  3789  			batch[i], batch[j] = batch[j], batch[i]
  3790  		}
  3791  	}
  3792  
  3793  	// Link the goroutines.
  3794  	for i := uint32(0); i < n; i++ {
  3795  		batch[i].schedlink.set(batch[i+1])
  3796  	}
  3797  
  3798  	// Now put the batch on global queue.
  3799  	lock(&sched.lock)
  3800  	globrunqputbatch(batch[0], batch[n], int32(n+1))
  3801  	unlock(&sched.lock)
  3802  	return true
  3803  }
  3804  
  3805  // Get g from local runnable queue.
  3806  // If inheritTime is true, gp should inherit the remaining time in the
  3807  // current time slice. Otherwise, it should start a new time slice.
  3808  // Executed only by the owner P.
  3809  func runqget(_p_ *p) (gp *g, inheritTime bool) {
  3810  	// If there's a runnext, it's the next G to run.
  3811  	for {
  3812  		next := _p_.runnext
  3813  		if next == 0 {
  3814  			break
  3815  		}
  3816  		if _p_.runnext.cas(next, 0) {
  3817  			return next.ptr(), true
  3818  		}
  3819  	}
  3820  
  3821  	for {
  3822  		h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers
  3823  		t := _p_.runqtail
  3824  		if t == h {
  3825  			return nil, false
  3826  		}
  3827  		gp := _p_.runq[h%uint32(len(_p_.runq))]
  3828  		if cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
  3829  			return gp, false
  3830  		}
  3831  	}
  3832  }
  3833  
  3834  // Grabs a batch of goroutines from _p_'s runnable queue into batch.
  3835  // Batch is a ring buffer starting at batchHead.
  3836  // Returns number of grabbed goroutines.
  3837  // Can be executed by any P.
  3838  func runqgrab(_p_ *p, batch *[256]*g, batchHead uint32, stealRunNextG bool) uint32 {
  3839  	for {
  3840  		h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers
  3841  		t := atomicload(&_p_.runqtail) // load-acquire, synchronize with the producer
  3842  		n := t - h
  3843  		n = n - n/2
  3844  		if n == 0 {
  3845  			if stealRunNextG {
  3846  				// Try to steal from _p_.runnext.
  3847  				if next := _p_.runnext; next != 0 {
  3848  					// Sleep to ensure that _p_ isn't about to run the g we
  3849  					// are about to steal.
  3850  					// The important use case here is when the g running on _p_
  3851  					// ready()s another g and then almost immediately blocks.
  3852  					// Instead of stealing runnext in this window, back off
  3853  					// to give _p_ a chance to schedule runnext. This will avoid
  3854  					// thrashing gs between different Ps.
  3855  					usleep(100)
  3856  					if !_p_.runnext.cas(next, 0) {
  3857  						continue
  3858  					}
  3859  					batch[batchHead%uint32(len(batch))] = next.ptr()
  3860  					return 1
  3861  				}
  3862  			}
  3863  			return 0
  3864  		}
  3865  		if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
  3866  			continue
  3867  		}
  3868  		for i := uint32(0); i < n; i++ {
  3869  			g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
  3870  			batch[(batchHead+i)%uint32(len(batch))] = g
  3871  		}
  3872  		if cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
  3873  			return n
  3874  		}
  3875  	}
  3876  }
  3877  
  3878  // Steal half of elements from local runnable queue of p2
  3879  // and put onto local runnable queue of p.
  3880  // Returns one of the stolen elements (or nil if failed).
  3881  func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
  3882  	t := _p_.runqtail
  3883  	n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
  3884  	if n == 0 {
  3885  		return nil
  3886  	}
  3887  	n--
  3888  	gp := _p_.runq[(t+n)%uint32(len(_p_.runq))]
  3889  	if n == 0 {
  3890  		return gp
  3891  	}
  3892  	h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
  3893  	if t-h+n >= uint32(len(_p_.runq)) {
  3894  		throw("runqsteal: runq overflow")
  3895  	}
  3896  	atomicstore(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
  3897  	return gp
  3898  }
  3899  
  3900  func testSchedLocalQueue() {
  3901  	_p_ := new(p)
  3902  	gs := make([]g, len(_p_.runq))
  3903  	for i := 0; i < len(_p_.runq); i++ {
  3904  		if g, _ := runqget(_p_); g != nil {
  3905  			throw("runq is not empty initially")
  3906  		}
  3907  		for j := 0; j < i; j++ {
  3908  			runqput(_p_, &gs[i], false)
  3909  		}
  3910  		for j := 0; j < i; j++ {
  3911  			if g, _ := runqget(_p_); g != &gs[i] {
  3912  				print("bad element at iter ", i, "/", j, "\n")
  3913  				throw("bad element")
  3914  			}
  3915  		}
  3916  		if g, _ := runqget(_p_); g != nil {
  3917  			throw("runq is not empty afterwards")
  3918  		}
  3919  	}
  3920  }
  3921  
  3922  func testSchedLocalQueueSteal() {
  3923  	p1 := new(p)
  3924  	p2 := new(p)
  3925  	gs := make([]g, len(p1.runq))
  3926  	for i := 0; i < len(p1.runq); i++ {
  3927  		for j := 0; j < i; j++ {
  3928  			gs[j].sig = 0
  3929  			runqput(p1, &gs[j], false)
  3930  		}
  3931  		gp := runqsteal(p2, p1, true)
  3932  		s := 0
  3933  		if gp != nil {
  3934  			s++
  3935  			gp.sig++
  3936  		}
  3937  		for {
  3938  			gp, _ = runqget(p2)
  3939  			if gp == nil {
  3940  				break
  3941  			}
  3942  			s++
  3943  			gp.sig++
  3944  		}
  3945  		for {
  3946  			gp, _ = runqget(p1)
  3947  			if gp == nil {
  3948  				break
  3949  			}
  3950  			gp.sig++
  3951  		}
  3952  		for j := 0; j < i; j++ {
  3953  			if gs[j].sig != 1 {
  3954  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
  3955  				throw("bad element")
  3956  			}
  3957  		}
  3958  		if s != i/2 && s != i/2+1 {
  3959  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
  3960  			throw("bad steal")
  3961  		}
  3962  	}
  3963  }
  3964  
  3965  //go:linkname setMaxThreads runtime/debug.setMaxThreads
  3966  func setMaxThreads(in int) (out int) {
  3967  	lock(&sched.lock)
  3968  	out = int(sched.maxmcount)
  3969  	sched.maxmcount = int32(in)
  3970  	checkmcount()
  3971  	unlock(&sched.lock)
  3972  	return
  3973  }
  3974  
  3975  func haveexperiment(name string) bool {
  3976  	x := goexperiment
  3977  	for x != "" {
  3978  		xname := ""
  3979  		i := index(x, ",")
  3980  		if i < 0 {
  3981  			xname, x = x, ""
  3982  		} else {
  3983  			xname, x = x[:i], x[i+1:]
  3984  		}
  3985  		if xname == name {
  3986  			return true
  3987  		}
  3988  	}
  3989  	return false
  3990  }
  3991  
  3992  //go:nosplit
  3993  func procPin() int {
  3994  	_g_ := getg()
  3995  	mp := _g_.m
  3996  
  3997  	mp.locks++
  3998  	return int(mp.p.ptr().id)
  3999  }
  4000  
  4001  //go:nosplit
  4002  func procUnpin() {
  4003  	_g_ := getg()
  4004  	_g_.m.locks--
  4005  }
  4006  
  4007  //go:linkname sync_runtime_procPin sync.runtime_procPin
  4008  //go:nosplit
  4009  func sync_runtime_procPin() int {
  4010  	return procPin()
  4011  }
  4012  
  4013  //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
  4014  //go:nosplit
  4015  func sync_runtime_procUnpin() {
  4016  	procUnpin()
  4017  }
  4018  
  4019  //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
  4020  //go:nosplit
  4021  func sync_atomic_runtime_procPin() int {
  4022  	return procPin()
  4023  }
  4024  
  4025  //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
  4026  //go:nosplit
  4027  func sync_atomic_runtime_procUnpin() {
  4028  	procUnpin()
  4029  }
  4030  
  4031  // Active spinning for sync.Mutex.
  4032  //go:linkname sync_runtime_canSpin sync.runtime_canSpin
  4033  //go:nosplit
  4034  func sync_runtime_canSpin(i int) bool {
  4035  	// sync.Mutex is cooperative, so we are conservative with spinning.
  4036  	// Spin only few times and only if running on a multicore machine and
  4037  	// GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
  4038  	// As opposed to runtime mutex we don't do passive spinning here,
  4039  	// because there can be work on global runq on on other Ps.
  4040  	if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
  4041  		return false
  4042  	}
  4043  	if p := getg().m.p.ptr(); !runqempty(p) {
  4044  		return false
  4045  	}
  4046  	return true
  4047  }
  4048  
  4049  //go:linkname sync_runtime_doSpin sync.runtime_doSpin
  4050  //go:nosplit
  4051  func sync_runtime_doSpin() {
  4052  	procyield(active_spin_cnt)
  4053  }