github.com/reiver/go@v0.0.0-20150109200633-1d0c7792f172/src/runtime/proc1.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  var (
    10  	m0 m
    11  	g0 g
    12  )
    13  
    14  // Goroutine scheduler
    15  // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
    16  //
    17  // The main concepts are:
    18  // G - goroutine.
    19  // M - worker thread, or machine.
    20  // P - processor, a resource that is required to execute Go code.
    21  //     M must have an associated P to execute Go code, however it can be
    22  //     blocked or in a syscall w/o an associated P.
    23  //
    24  // Design doc at http://golang.org/s/go11sched.
    25  
    26  const (
    27  	// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
    28  	// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
    29  	_GoidCacheBatch = 16
    30  )
    31  
    32  /*
    33  SchedT	sched;
    34  int32	gomaxprocs;
    35  uint32	needextram;
    36  bool	iscgo;
    37  M	m0;
    38  G	g0;	// idle goroutine for m0
    39  G*	lastg;
    40  M*	allm;
    41  M*	extram;
    42  P*	allp[MaxGomaxprocs+1];
    43  int8*	goos;
    44  int32	ncpu;
    45  int32	newprocs;
    46  
    47  Mutex allglock;	// the following vars are protected by this lock or by stoptheworld
    48  G**	allg;
    49  Slice	allgs;
    50  uintptr allglen;
    51  ForceGCState	forcegc;
    52  
    53  void mstart(void);
    54  static void runqput(P*, G*);
    55  static G* runqget(P*);
    56  static bool runqputslow(P*, G*, uint32, uint32);
    57  static G* runqsteal(P*, P*);
    58  static void mput(M*);
    59  static M* mget(void);
    60  static void mcommoninit(M*);
    61  static void schedule(void);
    62  static void procresize(int32);
    63  static void acquirep(P*);
    64  static P* releasep(void);
    65  static void newm(void(*)(void), P*);
    66  static void stopm(void);
    67  static void startm(P*, bool);
    68  static void handoffp(P*);
    69  static void wakep(void);
    70  static void stoplockedm(void);
    71  static void startlockedm(G*);
    72  static void sysmon(void);
    73  static uint32 retake(int64);
    74  static void incidlelocked(int32);
    75  static void checkdead(void);
    76  static void exitsyscall0(G*);
    77  void park_m(G*);
    78  static void goexit0(G*);
    79  static void gfput(P*, G*);
    80  static G* gfget(P*);
    81  static void gfpurge(P*);
    82  static void globrunqput(G*);
    83  static void globrunqputbatch(G*, G*, int32);
    84  static G* globrunqget(P*, int32);
    85  static P* pidleget(void);
    86  static void pidleput(P*);
    87  static void injectglist(G*);
    88  static bool preemptall(void);
    89  static bool preemptone(P*);
    90  static bool exitsyscallfast(void);
    91  static bool haveexperiment(int8*);
    92  void allgadd(G*);
    93  static void dropg(void);
    94  
    95  extern String buildVersion;
    96  */
    97  
    98  // The bootstrap sequence is:
    99  //
   100  //	call osinit
   101  //	call schedinit
   102  //	make & queue new G
   103  //	call runtime·mstart
   104  //
   105  // The new G calls runtime·main.
   106  func schedinit() {
   107  	// raceinit must be the first call to race detector.
   108  	// In particular, it must be done before mallocinit below calls racemapshadow.
   109  	_g_ := getg()
   110  	if raceenabled {
   111  		_g_.racectx = raceinit()
   112  	}
   113  
   114  	sched.maxmcount = 10000
   115  
   116  	tracebackinit()
   117  	symtabinit()
   118  	stackinit()
   119  	mallocinit()
   120  	mcommoninit(_g_.m)
   121  
   122  	goargs()
   123  	goenvs()
   124  	parsedebugvars()
   125  	wbshadowinit()
   126  	gcinit()
   127  
   128  	sched.lastpoll = uint64(nanotime())
   129  	procs := 1
   130  	if n := atoi(gogetenv("GOMAXPROCS")); n > 0 {
   131  		if n > _MaxGomaxprocs {
   132  			n = _MaxGomaxprocs
   133  		}
   134  		procs = n
   135  	}
   136  	if procresize(int32(procs)) != nil {
   137  		throw("unknown runnable goroutine during bootstrap")
   138  	}
   139  
   140  	if buildVersion == "" {
   141  		// Condition should never trigger.  This code just serves
   142  		// to ensure runtime·buildVersion is kept in the resulting binary.
   143  		buildVersion = "unknown"
   144  	}
   145  }
   146  
   147  func newsysmon() {
   148  	_newm(sysmon, nil)
   149  }
   150  
   151  func dumpgstatus(gp *g) {
   152  	_g_ := getg()
   153  	print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   154  	print("runtime:  g:  g=", _g_, ", goid=", _g_.goid, ",  g->atomicstatus=", readgstatus(_g_), "\n")
   155  }
   156  
   157  func checkmcount() {
   158  	// sched lock is held
   159  	if sched.mcount > sched.maxmcount {
   160  		print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
   161  		throw("thread exhaustion")
   162  	}
   163  }
   164  
   165  func mcommoninit(mp *m) {
   166  	_g_ := getg()
   167  
   168  	// g0 stack won't make sense for user (and is not necessary unwindable).
   169  	if _g_ != _g_.m.g0 {
   170  		callers(1, &mp.createstack[0], len(mp.createstack))
   171  	}
   172  
   173  	mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks())
   174  	if mp.fastrand == 0 {
   175  		mp.fastrand = 0x49f6428a
   176  	}
   177  
   178  	lock(&sched.lock)
   179  	mp.id = sched.mcount
   180  	sched.mcount++
   181  	checkmcount()
   182  	mpreinit(mp)
   183  	if mp.gsignal != nil {
   184  		mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
   185  	}
   186  
   187  	// Add to allm so garbage collector doesn't free g->m
   188  	// when it is just in a register or thread-local storage.
   189  	mp.alllink = allm
   190  
   191  	// NumCgoCall() iterates over allm w/o schedlock,
   192  	// so we need to publish it safely.
   193  	atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
   194  	unlock(&sched.lock)
   195  }
   196  
   197  // Mark gp ready to run.
   198  func ready(gp *g) {
   199  	status := readgstatus(gp)
   200  
   201  	// Mark runnable.
   202  	_g_ := getg()
   203  	_g_.m.locks++ // disable preemption because it can be holding p in a local var
   204  	if status&^_Gscan != _Gwaiting {
   205  		dumpgstatus(gp)
   206  		throw("bad g->status in ready")
   207  	}
   208  
   209  	// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
   210  	casgstatus(gp, _Gwaiting, _Grunnable)
   211  	runqput(_g_.m.p, gp)
   212  	if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 { // TODO: fast atomic
   213  		wakep()
   214  	}
   215  	_g_.m.locks--
   216  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
   217  		_g_.stackguard0 = stackPreempt
   218  	}
   219  }
   220  
   221  func gcprocs() int32 {
   222  	// Figure out how many CPUs to use during GC.
   223  	// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
   224  	lock(&sched.lock)
   225  	n := gomaxprocs
   226  	if n > ncpu {
   227  		n = ncpu
   228  	}
   229  	if n > _MaxGcproc {
   230  		n = _MaxGcproc
   231  	}
   232  	if n > sched.nmidle+1 { // one M is currently running
   233  		n = sched.nmidle + 1
   234  	}
   235  	unlock(&sched.lock)
   236  	return n
   237  }
   238  
   239  func needaddgcproc() bool {
   240  	lock(&sched.lock)
   241  	n := gomaxprocs
   242  	if n > ncpu {
   243  		n = ncpu
   244  	}
   245  	if n > _MaxGcproc {
   246  		n = _MaxGcproc
   247  	}
   248  	n -= sched.nmidle + 1 // one M is currently running
   249  	unlock(&sched.lock)
   250  	return n > 0
   251  }
   252  
   253  func helpgc(nproc int32) {
   254  	_g_ := getg()
   255  	lock(&sched.lock)
   256  	pos := 0
   257  	for n := int32(1); n < nproc; n++ { // one M is currently running
   258  		if allp[pos].mcache == _g_.m.mcache {
   259  			pos++
   260  		}
   261  		mp := mget()
   262  		if mp == nil {
   263  			throw("gcprocs inconsistency")
   264  		}
   265  		mp.helpgc = n
   266  		mp.mcache = allp[pos].mcache
   267  		pos++
   268  		notewakeup(&mp.park)
   269  	}
   270  	unlock(&sched.lock)
   271  }
   272  
   273  // Similar to stoptheworld but best-effort and can be called several times.
   274  // There is no reverse operation, used during crashing.
   275  // This function must not lock any mutexes.
   276  func freezetheworld() {
   277  	if gomaxprocs == 1 {
   278  		return
   279  	}
   280  	// stopwait and preemption requests can be lost
   281  	// due to races with concurrently executing threads,
   282  	// so try several times
   283  	for i := 0; i < 5; i++ {
   284  		// this should tell the scheduler to not start any new goroutines
   285  		sched.stopwait = 0x7fffffff
   286  		atomicstore(&sched.gcwaiting, 1)
   287  		// this should stop running goroutines
   288  		if !preemptall() {
   289  			break // no running goroutines
   290  		}
   291  		usleep(1000)
   292  	}
   293  	// to be sure
   294  	usleep(1000)
   295  	preemptall()
   296  	usleep(1000)
   297  }
   298  
   299  func isscanstatus(status uint32) bool {
   300  	if status == _Gscan {
   301  		throw("isscanstatus: Bad status Gscan")
   302  	}
   303  	return status&_Gscan == _Gscan
   304  }
   305  
   306  // All reads and writes of g's status go through readgstatus, casgstatus
   307  // castogscanstatus, casfrom_Gscanstatus.
   308  //go:nosplit
   309  func readgstatus(gp *g) uint32 {
   310  	return atomicload(&gp.atomicstatus)
   311  }
   312  
   313  // The Gscanstatuses are acting like locks and this releases them.
   314  // If it proves to be a performance hit we should be able to make these
   315  // simple atomic stores but for now we are going to throw if
   316  // we see an inconsistent state.
   317  func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
   318  	success := false
   319  
   320  	// Check that transition is valid.
   321  	switch oldval {
   322  	default:
   323  		print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
   324  		dumpgstatus(gp)
   325  		throw("casfrom_Gscanstatus:top gp->status is not in scan state")
   326  	case _Gscanrunnable,
   327  		_Gscanwaiting,
   328  		_Gscanrunning,
   329  		_Gscansyscall:
   330  		if newval == oldval&^_Gscan {
   331  			success = cas(&gp.atomicstatus, oldval, newval)
   332  		}
   333  	case _Gscanenqueue:
   334  		if newval == _Gwaiting {
   335  			success = cas(&gp.atomicstatus, oldval, newval)
   336  		}
   337  	}
   338  	if !success {
   339  		print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
   340  		dumpgstatus(gp)
   341  		throw("casfrom_Gscanstatus: gp->status is not in scan state")
   342  	}
   343  }
   344  
   345  // This will return false if the gp is not in the expected status and the cas fails.
   346  // This acts like a lock acquire while the casfromgstatus acts like a lock release.
   347  func castogscanstatus(gp *g, oldval, newval uint32) bool {
   348  	switch oldval {
   349  	case _Grunnable,
   350  		_Gwaiting,
   351  		_Gsyscall:
   352  		if newval == oldval|_Gscan {
   353  			return cas(&gp.atomicstatus, oldval, newval)
   354  		}
   355  	case _Grunning:
   356  		if newval == _Gscanrunning || newval == _Gscanenqueue {
   357  			return cas(&gp.atomicstatus, oldval, newval)
   358  		}
   359  	}
   360  	print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
   361  	throw("castogscanstatus")
   362  	panic("not reached")
   363  }
   364  
   365  // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
   366  // and casfrom_Gscanstatus instead.
   367  // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
   368  // put it in the Gscan state is finished.
   369  //go:nosplit
   370  func casgstatus(gp *g, oldval, newval uint32) {
   371  	if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
   372  		systemstack(func() {
   373  			print("casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
   374  			throw("casgstatus: bad incoming values")
   375  		})
   376  	}
   377  
   378  	// loop if gp->atomicstatus is in a scan state giving
   379  	// GC time to finish and change the state to oldval.
   380  	for !cas(&gp.atomicstatus, oldval, newval) {
   381  		if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
   382  			systemstack(func() {
   383  				throw("casgstatus: waiting for Gwaiting but is Grunnable")
   384  			})
   385  		}
   386  		// Help GC if needed.
   387  		// if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
   388  		// 	gp.preemptscan = false
   389  		// 	systemstack(func() {
   390  		// 		gcphasework(gp)
   391  		// 	})
   392  		// }
   393  	}
   394  }
   395  
   396  // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
   397  // Returns old status. Cannot call casgstatus directly, because we are racing with an
   398  // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
   399  // it might have become Grunnable by the time we get to the cas. If we called casgstatus,
   400  // it would loop waiting for the status to go back to Gwaiting, which it never will.
   401  //go:nosplit
   402  func casgcopystack(gp *g) uint32 {
   403  	for {
   404  		oldstatus := readgstatus(gp) &^ _Gscan
   405  		if oldstatus != _Gwaiting && oldstatus != _Grunnable {
   406  			throw("copystack: bad status, not Gwaiting or Grunnable")
   407  		}
   408  		if cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
   409  			return oldstatus
   410  		}
   411  	}
   412  }
   413  
   414  // stopg ensures that gp is stopped at a GC safe point where its stack can be scanned
   415  // or in the context of a moving collector the pointers can be flipped from pointing
   416  // to old object to pointing to new objects.
   417  // If stopg returns true, the caller knows gp is at a GC safe point and will remain there until
   418  // the caller calls restartg.
   419  // If stopg returns false, the caller is not responsible for calling restartg. This can happen
   420  // if another thread, either the gp itself or another GC thread is taking the responsibility
   421  // to do the GC work related to this thread.
   422  func stopg(gp *g) bool {
   423  	for {
   424  		if gp.gcworkdone {
   425  			return false
   426  		}
   427  
   428  		switch s := readgstatus(gp); s {
   429  		default:
   430  			dumpgstatus(gp)
   431  			throw("stopg: gp->atomicstatus is not valid")
   432  
   433  		case _Gdead:
   434  			return false
   435  
   436  		case _Gcopystack:
   437  			// Loop until a new stack is in place.
   438  
   439  		case _Grunnable,
   440  			_Gsyscall,
   441  			_Gwaiting:
   442  			// Claim goroutine by setting scan bit.
   443  			if !castogscanstatus(gp, s, s|_Gscan) {
   444  				break
   445  			}
   446  			// In scan state, do work.
   447  			gcphasework(gp)
   448  			return true
   449  
   450  		case _Gscanrunnable,
   451  			_Gscanwaiting,
   452  			_Gscansyscall:
   453  			// Goroutine already claimed by another GC helper.
   454  			return false
   455  
   456  		case _Grunning:
   457  			// Claim goroutine, so we aren't racing with a status
   458  			// transition away from Grunning.
   459  			if !castogscanstatus(gp, _Grunning, _Gscanrunning) {
   460  				break
   461  			}
   462  
   463  			// Mark gp for preemption.
   464  			if !gp.gcworkdone {
   465  				gp.preemptscan = true
   466  				gp.preempt = true
   467  				gp.stackguard0 = stackPreempt
   468  			}
   469  
   470  			// Unclaim.
   471  			casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
   472  			return false
   473  		}
   474  	}
   475  }
   476  
   477  // The GC requests that this routine be moved from a scanmumble state to a mumble state.
   478  func restartg(gp *g) {
   479  	s := readgstatus(gp)
   480  	switch s {
   481  	default:
   482  		dumpgstatus(gp)
   483  		throw("restartg: unexpected status")
   484  
   485  	case _Gdead:
   486  		// ok
   487  
   488  	case _Gscanrunnable,
   489  		_Gscanwaiting,
   490  		_Gscansyscall:
   491  		casfrom_Gscanstatus(gp, s, s&^_Gscan)
   492  
   493  	// Scan is now completed.
   494  	// Goroutine now needs to be made runnable.
   495  	// We put it on the global run queue; ready blocks on the global scheduler lock.
   496  	case _Gscanenqueue:
   497  		casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting)
   498  		if gp != getg().m.curg {
   499  			throw("processing Gscanenqueue on wrong m")
   500  		}
   501  		dropg()
   502  		ready(gp)
   503  	}
   504  }
   505  
   506  func stopscanstart(gp *g) {
   507  	_g_ := getg()
   508  	if _g_ == gp {
   509  		throw("GC not moved to G0")
   510  	}
   511  	if stopg(gp) {
   512  		if !isscanstatus(readgstatus(gp)) {
   513  			dumpgstatus(gp)
   514  			throw("GC not in scan state")
   515  		}
   516  		restartg(gp)
   517  	}
   518  }
   519  
   520  // Runs on g0 and does the actual work after putting the g back on the run queue.
   521  func mquiesce(gpmaster *g) {
   522  	// enqueue the calling goroutine.
   523  	restartg(gpmaster)
   524  
   525  	activeglen := len(allgs)
   526  	for i := 0; i < activeglen; i++ {
   527  		gp := allgs[i]
   528  		if readgstatus(gp) == _Gdead {
   529  			gp.gcworkdone = true // noop scan.
   530  		} else {
   531  			gp.gcworkdone = false
   532  		}
   533  		stopscanstart(gp)
   534  	}
   535  
   536  	// Check that the G's gcwork (such as scanning) has been done. If not do it now.
   537  	// You can end up doing work here if the page trap on a Grunning Goroutine has
   538  	// not been sprung or in some race situations. For example a runnable goes dead
   539  	// and is started up again with a gp->gcworkdone set to false.
   540  	for i := 0; i < activeglen; i++ {
   541  		gp := allgs[i]
   542  		for !gp.gcworkdone {
   543  			status := readgstatus(gp)
   544  			if status == _Gdead {
   545  				//do nothing, scan not needed.
   546  				gp.gcworkdone = true // scan is a noop
   547  				break
   548  			}
   549  			if status == _Grunning && gp.stackguard0 == uintptr(stackPreempt) && notetsleep(&sched.stopnote, 100*1000) { // nanosecond arg
   550  				noteclear(&sched.stopnote)
   551  			} else {
   552  				stopscanstart(gp)
   553  			}
   554  		}
   555  	}
   556  
   557  	for i := 0; i < activeglen; i++ {
   558  		gp := allgs[i]
   559  		status := readgstatus(gp)
   560  		if isscanstatus(status) {
   561  			print("mstopandscang:bottom: post scan bad status gp=", gp, " has status ", hex(status), "\n")
   562  			dumpgstatus(gp)
   563  		}
   564  		if !gp.gcworkdone && status != _Gdead {
   565  			print("mstopandscang:bottom: post scan gp=", gp, "->gcworkdone still false\n")
   566  			dumpgstatus(gp)
   567  		}
   568  	}
   569  
   570  	schedule() // Never returns.
   571  }
   572  
   573  // quiesce moves all the goroutines to a GC safepoint which for now is a at preemption point.
   574  // If the global gcphase is GCmark quiesce will ensure that all of the goroutine's stacks
   575  // have been scanned before it returns.
   576  func quiesce(mastergp *g) {
   577  	castogscanstatus(mastergp, _Grunning, _Gscanenqueue)
   578  	// Now move this to the g0 (aka m) stack.
   579  	// g0 will potentially scan this thread and put mastergp on the runqueue
   580  	mcall(mquiesce)
   581  }
   582  
   583  // This is used by the GC as well as the routines that do stack dumps. In the case
   584  // of GC all the routines can be reliably stopped. This is not always the case
   585  // when the system is in panic or being exited.
   586  func stoptheworld() {
   587  	_g_ := getg()
   588  
   589  	// If we hold a lock, then we won't be able to stop another M
   590  	// that is blocked trying to acquire the lock.
   591  	if _g_.m.locks > 0 {
   592  		throw("stoptheworld: holding locks")
   593  	}
   594  
   595  	lock(&sched.lock)
   596  	sched.stopwait = gomaxprocs
   597  	atomicstore(&sched.gcwaiting, 1)
   598  	preemptall()
   599  	// stop current P
   600  	_g_.m.p.status = _Pgcstop // Pgcstop is only diagnostic.
   601  	sched.stopwait--
   602  	// try to retake all P's in Psyscall status
   603  	for i := 0; i < int(gomaxprocs); i++ {
   604  		p := allp[i]
   605  		s := p.status
   606  		if s == _Psyscall && cas(&p.status, s, _Pgcstop) {
   607  			sched.stopwait--
   608  		}
   609  	}
   610  	// stop idle P's
   611  	for {
   612  		p := pidleget()
   613  		if p == nil {
   614  			break
   615  		}
   616  		p.status = _Pgcstop
   617  		sched.stopwait--
   618  	}
   619  	wait := sched.stopwait > 0
   620  	unlock(&sched.lock)
   621  
   622  	// wait for remaining P's to stop voluntarily
   623  	if wait {
   624  		for {
   625  			// wait for 100us, then try to re-preempt in case of any races
   626  			if notetsleep(&sched.stopnote, 100*1000) {
   627  				noteclear(&sched.stopnote)
   628  				break
   629  			}
   630  			preemptall()
   631  		}
   632  	}
   633  	if sched.stopwait != 0 {
   634  		throw("stoptheworld: not stopped")
   635  	}
   636  	for i := 0; i < int(gomaxprocs); i++ {
   637  		p := allp[i]
   638  		if p.status != _Pgcstop {
   639  			throw("stoptheworld: not stopped")
   640  		}
   641  	}
   642  }
   643  
   644  func mhelpgc() {
   645  	_g_ := getg()
   646  	_g_.m.helpgc = -1
   647  }
   648  
   649  func starttheworld() {
   650  	_g_ := getg()
   651  
   652  	_g_.m.locks++        // disable preemption because it can be holding p in a local var
   653  	gp := netpoll(false) // non-blocking
   654  	injectglist(gp)
   655  	add := needaddgcproc()
   656  	lock(&sched.lock)
   657  
   658  	procs := gomaxprocs
   659  	if newprocs != 0 {
   660  		procs = newprocs
   661  		newprocs = 0
   662  	}
   663  	p1 := procresize(procs)
   664  	sched.gcwaiting = 0
   665  	if sched.sysmonwait != 0 {
   666  		sched.sysmonwait = 0
   667  		notewakeup(&sched.sysmonnote)
   668  	}
   669  	unlock(&sched.lock)
   670  
   671  	for p1 != nil {
   672  		p := p1
   673  		p1 = p1.link
   674  		if p.m != nil {
   675  			mp := p.m
   676  			p.m = nil
   677  			if mp.nextp != nil {
   678  				throw("starttheworld: inconsistent mp->nextp")
   679  			}
   680  			mp.nextp = p
   681  			notewakeup(&mp.park)
   682  		} else {
   683  			// Start M to run P.  Do not start another M below.
   684  			_newm(nil, p)
   685  			add = false
   686  		}
   687  	}
   688  
   689  	// Wakeup an additional proc in case we have excessive runnable goroutines
   690  	// in local queues or in the global queue. If we don't, the proc will park itself.
   691  	// If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
   692  	if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 {
   693  		wakep()
   694  	}
   695  
   696  	if add {
   697  		// If GC could have used another helper proc, start one now,
   698  		// in the hope that it will be available next time.
   699  		// It would have been even better to start it before the collection,
   700  		// but doing so requires allocating memory, so it's tricky to
   701  		// coordinate.  This lazy approach works out in practice:
   702  		// we don't mind if the first couple gc rounds don't have quite
   703  		// the maximum number of procs.
   704  		_newm(mhelpgc, nil)
   705  	}
   706  	_g_.m.locks--
   707  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
   708  		_g_.stackguard0 = stackPreempt
   709  	}
   710  }
   711  
   712  // Called to start an M.
   713  //go:nosplit
   714  func mstart() {
   715  	_g_ := getg()
   716  
   717  	if _g_.stack.lo == 0 {
   718  		// Initialize stack bounds from system stack.
   719  		// Cgo may have left stack size in stack.hi.
   720  		size := _g_.stack.hi
   721  		if size == 0 {
   722  			size = 8192
   723  		}
   724  		_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
   725  		_g_.stack.lo = _g_.stack.hi - size + 1024
   726  	}
   727  	// Initialize stack guards so that we can start calling
   728  	// both Go and C functions with stack growth prologues.
   729  	_g_.stackguard0 = _g_.stack.lo + _StackGuard
   730  	_g_.stackguard1 = _g_.stackguard0
   731  	mstart1()
   732  }
   733  
   734  func mstart1() {
   735  	_g_ := getg()
   736  
   737  	if _g_ != _g_.m.g0 {
   738  		throw("bad runtime·mstart")
   739  	}
   740  
   741  	// Record top of stack for use by mcall.
   742  	// Once we call schedule we're never coming back,
   743  	// so other calls can reuse this stack space.
   744  	gosave(&_g_.m.g0.sched)
   745  	_g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used
   746  	asminit()
   747  	minit()
   748  
   749  	// Install signal handlers; after minit so that minit can
   750  	// prepare the thread to be able to handle the signals.
   751  	if _g_.m == &m0 {
   752  		initsig()
   753  	}
   754  
   755  	if _g_.m.mstartfn != nil {
   756  		fn := *(*func())(unsafe.Pointer(&_g_.m.mstartfn))
   757  		fn()
   758  	}
   759  
   760  	if _g_.m.helpgc != 0 {
   761  		_g_.m.helpgc = 0
   762  		stopm()
   763  	} else if _g_.m != &m0 {
   764  		acquirep(_g_.m.nextp)
   765  		_g_.m.nextp = nil
   766  	}
   767  	schedule()
   768  
   769  	// TODO(brainman): This point is never reached, because scheduler
   770  	// does not release os threads at the moment. But once this path
   771  	// is enabled, we must remove our seh here.
   772  }
   773  
   774  // When running with cgo, we call _cgo_thread_start
   775  // to start threads for us so that we can play nicely with
   776  // foreign code.
   777  var cgoThreadStart unsafe.Pointer
   778  
   779  type cgothreadstart struct {
   780  	g   *g
   781  	tls *uint64
   782  	fn  unsafe.Pointer
   783  }
   784  
   785  // Allocate a new m unassociated with any thread.
   786  // Can use p for allocation context if needed.
   787  func allocm(_p_ *p) *m {
   788  	_g_ := getg()
   789  	_g_.m.locks++ // disable GC because it can be called from sysmon
   790  	if _g_.m.p == nil {
   791  		acquirep(_p_) // temporarily borrow p for mallocs in this function
   792  	}
   793  	mp := newM()
   794  	mcommoninit(mp)
   795  
   796  	// In case of cgo or Solaris, pthread_create will make us a stack.
   797  	// Windows and Plan 9 will layout sched stack on OS stack.
   798  	if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" {
   799  		mp.g0 = malg(-1)
   800  	} else {
   801  		mp.g0 = malg(8192)
   802  	}
   803  	mp.g0.m = mp
   804  
   805  	if _p_ == _g_.m.p {
   806  		releasep()
   807  	}
   808  	_g_.m.locks--
   809  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
   810  		_g_.stackguard0 = stackPreempt
   811  	}
   812  
   813  	return mp
   814  }
   815  
   816  func allocg() *g {
   817  	return newG()
   818  }
   819  
   820  // needm is called when a cgo callback happens on a
   821  // thread without an m (a thread not created by Go).
   822  // In this case, needm is expected to find an m to use
   823  // and return with m, g initialized correctly.
   824  // Since m and g are not set now (likely nil, but see below)
   825  // needm is limited in what routines it can call. In particular
   826  // it can only call nosplit functions (textflag 7) and cannot
   827  // do any scheduling that requires an m.
   828  //
   829  // In order to avoid needing heavy lifting here, we adopt
   830  // the following strategy: there is a stack of available m's
   831  // that can be stolen. Using compare-and-swap
   832  // to pop from the stack has ABA races, so we simulate
   833  // a lock by doing an exchange (via casp) to steal the stack
   834  // head and replace the top pointer with MLOCKED (1).
   835  // This serves as a simple spin lock that we can use even
   836  // without an m. The thread that locks the stack in this way
   837  // unlocks the stack by storing a valid stack head pointer.
   838  //
   839  // In order to make sure that there is always an m structure
   840  // available to be stolen, we maintain the invariant that there
   841  // is always one more than needed. At the beginning of the
   842  // program (if cgo is in use) the list is seeded with a single m.
   843  // If needm finds that it has taken the last m off the list, its job
   844  // is - once it has installed its own m so that it can do things like
   845  // allocate memory - to create a spare m and put it on the list.
   846  //
   847  // Each of these extra m's also has a g0 and a curg that are
   848  // pressed into service as the scheduling stack and current
   849  // goroutine for the duration of the cgo callback.
   850  //
   851  // When the callback is done with the m, it calls dropm to
   852  // put the m back on the list.
   853  //go:nosplit
   854  func needm(x byte) {
   855  	if needextram != 0 {
   856  		// Can happen if C/C++ code calls Go from a global ctor.
   857  		// Can not throw, because scheduler is not initialized yet.
   858  		write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
   859  		exit(1)
   860  	}
   861  
   862  	// Lock extra list, take head, unlock popped list.
   863  	// nilokay=false is safe here because of the invariant above,
   864  	// that the extra list always contains or will soon contain
   865  	// at least one m.
   866  	mp := lockextra(false)
   867  
   868  	// Set needextram when we've just emptied the list,
   869  	// so that the eventual call into cgocallbackg will
   870  	// allocate a new m for the extra list. We delay the
   871  	// allocation until then so that it can be done
   872  	// after exitsyscall makes sure it is okay to be
   873  	// running at all (that is, there's no garbage collection
   874  	// running right now).
   875  	mp.needextram = mp.schedlink == nil
   876  	unlockextra(mp.schedlink)
   877  
   878  	// Install g (= m->g0) and set the stack bounds
   879  	// to match the current stack. We don't actually know
   880  	// how big the stack is, like we don't know how big any
   881  	// scheduling stack is, but we assume there's at least 32 kB,
   882  	// which is more than enough for us.
   883  	setg(mp.g0)
   884  	_g_ := getg()
   885  	_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
   886  	_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
   887  	_g_.stackguard0 = _g_.stack.lo + _StackGuard
   888  
   889  	// Initialize this thread to use the m.
   890  	asminit()
   891  	minit()
   892  }
   893  
   894  var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
   895  
   896  // newextram allocates an m and puts it on the extra list.
   897  // It is called with a working local m, so that it can do things
   898  // like call schedlock and allocate.
   899  func newextram() {
   900  	// Create extra goroutine locked to extra m.
   901  	// The goroutine is the context in which the cgo callback will run.
   902  	// The sched.pc will never be returned to, but setting it to
   903  	// goexit makes clear to the traceback routines where
   904  	// the goroutine stack ends.
   905  	mp := allocm(nil)
   906  	gp := malg(4096)
   907  	gp.sched.pc = funcPC(goexit) + _PCQuantum
   908  	gp.sched.sp = gp.stack.hi
   909  	gp.sched.sp -= 4 * regSize // extra space in case of reads slightly beyond frame
   910  	gp.sched.lr = 0
   911  	gp.sched.g = guintptr(unsafe.Pointer(gp))
   912  	gp.syscallpc = gp.sched.pc
   913  	gp.syscallsp = gp.sched.sp
   914  	// malg returns status as Gidle, change to Gsyscall before adding to allg
   915  	// where GC will see it.
   916  	casgstatus(gp, _Gidle, _Gsyscall)
   917  	gp.m = mp
   918  	mp.curg = gp
   919  	mp.locked = _LockInternal
   920  	mp.lockedg = gp
   921  	gp.lockedm = mp
   922  	gp.goid = int64(xadd64(&sched.goidgen, 1))
   923  	if raceenabled {
   924  		gp.racectx = racegostart(funcPC(newextram))
   925  	}
   926  	// put on allg for garbage collector
   927  	allgadd(gp)
   928  
   929  	// Add m to the extra list.
   930  	mnext := lockextra(true)
   931  	mp.schedlink = mnext
   932  	unlockextra(mp)
   933  }
   934  
   935  // dropm is called when a cgo callback has called needm but is now
   936  // done with the callback and returning back into the non-Go thread.
   937  // It puts the current m back onto the extra list.
   938  //
   939  // The main expense here is the call to signalstack to release the
   940  // m's signal stack, and then the call to needm on the next callback
   941  // from this thread. It is tempting to try to save the m for next time,
   942  // which would eliminate both these costs, but there might not be
   943  // a next time: the current thread (which Go does not control) might exit.
   944  // If we saved the m for that thread, there would be an m leak each time
   945  // such a thread exited. Instead, we acquire and release an m on each
   946  // call. These should typically not be scheduling operations, just a few
   947  // atomics, so the cost should be small.
   948  //
   949  // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
   950  // variable using pthread_key_create. Unlike the pthread keys we already use
   951  // on OS X, this dummy key would never be read by Go code. It would exist
   952  // only so that we could register at thread-exit-time destructor.
   953  // That destructor would put the m back onto the extra list.
   954  // This is purely a performance optimization. The current version,
   955  // in which dropm happens on each cgo call, is still correct too.
   956  // We may have to keep the current version on systems with cgo
   957  // but without pthreads, like Windows.
   958  func dropm() {
   959  	// Undo whatever initialization minit did during needm.
   960  	unminit()
   961  
   962  	// Clear m and g, and return m to the extra list.
   963  	// After the call to setg we can only call nosplit functions
   964  	// with no pointer manipulation.
   965  	mp := getg().m
   966  	mnext := lockextra(true)
   967  	mp.schedlink = mnext
   968  
   969  	setg(nil)
   970  	unlockextra(mp)
   971  }
   972  
   973  var extram uintptr
   974  
   975  // lockextra locks the extra list and returns the list head.
   976  // The caller must unlock the list by storing a new list head
   977  // to extram. If nilokay is true, then lockextra will
   978  // return a nil list head if that's what it finds. If nilokay is false,
   979  // lockextra will keep waiting until the list head is no longer nil.
   980  //go:nosplit
   981  func lockextra(nilokay bool) *m {
   982  	const locked = 1
   983  
   984  	for {
   985  		old := atomicloaduintptr(&extram)
   986  		if old == locked {
   987  			yield := osyield
   988  			yield()
   989  			continue
   990  		}
   991  		if old == 0 && !nilokay {
   992  			usleep(1)
   993  			continue
   994  		}
   995  		if casuintptr(&extram, old, locked) {
   996  			return (*m)(unsafe.Pointer(old))
   997  		}
   998  		yield := osyield
   999  		yield()
  1000  		continue
  1001  	}
  1002  }
  1003  
  1004  //go:nosplit
  1005  func unlockextra(mp *m) {
  1006  	atomicstoreuintptr(&extram, uintptr(unsafe.Pointer(mp)))
  1007  }
  1008  
  1009  // Create a new m.  It will start off with a call to fn, or else the scheduler.
  1010  func _newm(fn func(), _p_ *p) {
  1011  	mp := allocm(_p_)
  1012  	mp.nextp = _p_
  1013  	mp.mstartfn = *(*unsafe.Pointer)(unsafe.Pointer(&fn))
  1014  
  1015  	if iscgo {
  1016  		var ts cgothreadstart
  1017  		if _cgo_thread_start == nil {
  1018  			throw("_cgo_thread_start missing")
  1019  		}
  1020  		ts.g = mp.g0
  1021  		ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
  1022  		ts.fn = unsafe.Pointer(funcPC(mstart))
  1023  		asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
  1024  		return
  1025  	}
  1026  	newosproc(mp, unsafe.Pointer(mp.g0.stack.hi))
  1027  }
  1028  
  1029  // Stops execution of the current m until new work is available.
  1030  // Returns with acquired P.
  1031  func stopm() {
  1032  	_g_ := getg()
  1033  
  1034  	if _g_.m.locks != 0 {
  1035  		throw("stopm holding locks")
  1036  	}
  1037  	if _g_.m.p != nil {
  1038  		throw("stopm holding p")
  1039  	}
  1040  	if _g_.m.spinning {
  1041  		_g_.m.spinning = false
  1042  		xadd(&sched.nmspinning, -1)
  1043  	}
  1044  
  1045  retry:
  1046  	lock(&sched.lock)
  1047  	mput(_g_.m)
  1048  	unlock(&sched.lock)
  1049  	notesleep(&_g_.m.park)
  1050  	noteclear(&_g_.m.park)
  1051  	if _g_.m.helpgc != 0 {
  1052  		gchelper()
  1053  		_g_.m.helpgc = 0
  1054  		_g_.m.mcache = nil
  1055  		goto retry
  1056  	}
  1057  	acquirep(_g_.m.nextp)
  1058  	_g_.m.nextp = nil
  1059  }
  1060  
  1061  func mspinning() {
  1062  	getg().m.spinning = true
  1063  }
  1064  
  1065  // Schedules some M to run the p (creates an M if necessary).
  1066  // If p==nil, tries to get an idle P, if no idle P's does nothing.
  1067  func startm(_p_ *p, spinning bool) {
  1068  	lock(&sched.lock)
  1069  	if _p_ == nil {
  1070  		_p_ = pidleget()
  1071  		if _p_ == nil {
  1072  			unlock(&sched.lock)
  1073  			if spinning {
  1074  				xadd(&sched.nmspinning, -1)
  1075  			}
  1076  			return
  1077  		}
  1078  	}
  1079  	mp := mget()
  1080  	unlock(&sched.lock)
  1081  	if mp == nil {
  1082  		var fn func()
  1083  		if spinning {
  1084  			fn = mspinning
  1085  		}
  1086  		_newm(fn, _p_)
  1087  		return
  1088  	}
  1089  	if mp.spinning {
  1090  		throw("startm: m is spinning")
  1091  	}
  1092  	if mp.nextp != nil {
  1093  		throw("startm: m has p")
  1094  	}
  1095  	mp.spinning = spinning
  1096  	mp.nextp = _p_
  1097  	notewakeup(&mp.park)
  1098  }
  1099  
  1100  // Hands off P from syscall or locked M.
  1101  func handoffp(_p_ *p) {
  1102  	// if it has local work, start it straight away
  1103  	if _p_.runqhead != _p_.runqtail || sched.runqsize != 0 {
  1104  		startm(_p_, false)
  1105  		return
  1106  	}
  1107  	// no local work, check that there are no spinning/idle M's,
  1108  	// otherwise our help is not required
  1109  	if atomicload(&sched.nmspinning)+atomicload(&sched.npidle) == 0 && cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
  1110  		startm(_p_, true)
  1111  		return
  1112  	}
  1113  	lock(&sched.lock)
  1114  	if sched.gcwaiting != 0 {
  1115  		_p_.status = _Pgcstop
  1116  		sched.stopwait--
  1117  		if sched.stopwait == 0 {
  1118  			notewakeup(&sched.stopnote)
  1119  		}
  1120  		unlock(&sched.lock)
  1121  		return
  1122  	}
  1123  	if sched.runqsize != 0 {
  1124  		unlock(&sched.lock)
  1125  		startm(_p_, false)
  1126  		return
  1127  	}
  1128  	// If this is the last running P and nobody is polling network,
  1129  	// need to wakeup another M to poll network.
  1130  	if sched.npidle == uint32(gomaxprocs-1) && atomicload64(&sched.lastpoll) != 0 {
  1131  		unlock(&sched.lock)
  1132  		startm(_p_, false)
  1133  		return
  1134  	}
  1135  	pidleput(_p_)
  1136  	unlock(&sched.lock)
  1137  }
  1138  
  1139  // Tries to add one more P to execute G's.
  1140  // Called when a G is made runnable (newproc, ready).
  1141  func wakep() {
  1142  	// be conservative about spinning threads
  1143  	if !cas(&sched.nmspinning, 0, 1) {
  1144  		return
  1145  	}
  1146  	startm(nil, true)
  1147  }
  1148  
  1149  // Stops execution of the current m that is locked to a g until the g is runnable again.
  1150  // Returns with acquired P.
  1151  func stoplockedm() {
  1152  	_g_ := getg()
  1153  
  1154  	if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
  1155  		throw("stoplockedm: inconsistent locking")
  1156  	}
  1157  	if _g_.m.p != nil {
  1158  		// Schedule another M to run this p.
  1159  		_p_ := releasep()
  1160  		handoffp(_p_)
  1161  	}
  1162  	incidlelocked(1)
  1163  	// Wait until another thread schedules lockedg again.
  1164  	notesleep(&_g_.m.park)
  1165  	noteclear(&_g_.m.park)
  1166  	status := readgstatus(_g_.m.lockedg)
  1167  	if status&^_Gscan != _Grunnable {
  1168  		print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
  1169  		dumpgstatus(_g_)
  1170  		throw("stoplockedm: not runnable")
  1171  	}
  1172  	acquirep(_g_.m.nextp)
  1173  	_g_.m.nextp = nil
  1174  }
  1175  
  1176  // Schedules the locked m to run the locked gp.
  1177  func startlockedm(gp *g) {
  1178  	_g_ := getg()
  1179  
  1180  	mp := gp.lockedm
  1181  	if mp == _g_.m {
  1182  		throw("startlockedm: locked to me")
  1183  	}
  1184  	if mp.nextp != nil {
  1185  		throw("startlockedm: m has p")
  1186  	}
  1187  	// directly handoff current P to the locked m
  1188  	incidlelocked(-1)
  1189  	_p_ := releasep()
  1190  	mp.nextp = _p_
  1191  	notewakeup(&mp.park)
  1192  	stopm()
  1193  }
  1194  
  1195  // Stops the current m for stoptheworld.
  1196  // Returns when the world is restarted.
  1197  func gcstopm() {
  1198  	_g_ := getg()
  1199  
  1200  	if sched.gcwaiting == 0 {
  1201  		throw("gcstopm: not waiting for gc")
  1202  	}
  1203  	if _g_.m.spinning {
  1204  		_g_.m.spinning = false
  1205  		xadd(&sched.nmspinning, -1)
  1206  	}
  1207  	_p_ := releasep()
  1208  	lock(&sched.lock)
  1209  	_p_.status = _Pgcstop
  1210  	sched.stopwait--
  1211  	if sched.stopwait == 0 {
  1212  		notewakeup(&sched.stopnote)
  1213  	}
  1214  	unlock(&sched.lock)
  1215  	stopm()
  1216  }
  1217  
  1218  // Schedules gp to run on the current M.
  1219  // Never returns.
  1220  func execute(gp *g) {
  1221  	_g_ := getg()
  1222  
  1223  	casgstatus(gp, _Grunnable, _Grunning)
  1224  	gp.waitsince = 0
  1225  	gp.preempt = false
  1226  	gp.stackguard0 = gp.stack.lo + _StackGuard
  1227  	_g_.m.p.schedtick++
  1228  	_g_.m.curg = gp
  1229  	gp.m = _g_.m
  1230  
  1231  	// Check whether the profiler needs to be turned on or off.
  1232  	hz := sched.profilehz
  1233  	if _g_.m.profilehz != hz {
  1234  		resetcpuprofiler(hz)
  1235  	}
  1236  
  1237  	gogo(&gp.sched)
  1238  }
  1239  
  1240  // Finds a runnable goroutine to execute.
  1241  // Tries to steal from other P's, get g from global queue, poll network.
  1242  func findrunnable() *g {
  1243  	_g_ := getg()
  1244  
  1245  top:
  1246  	if sched.gcwaiting != 0 {
  1247  		gcstopm()
  1248  		goto top
  1249  	}
  1250  	if fingwait && fingwake {
  1251  		if gp := wakefing(); gp != nil {
  1252  			ready(gp)
  1253  		}
  1254  	}
  1255  
  1256  	// local runq
  1257  	if gp := runqget(_g_.m.p); gp != nil {
  1258  		return gp
  1259  	}
  1260  
  1261  	// global runq
  1262  	if sched.runqsize != 0 {
  1263  		lock(&sched.lock)
  1264  		gp := globrunqget(_g_.m.p, 0)
  1265  		unlock(&sched.lock)
  1266  		if gp != nil {
  1267  			return gp
  1268  		}
  1269  	}
  1270  
  1271  	// poll network - returns list of goroutines
  1272  	if gp := netpoll(false); gp != nil { // non-blocking
  1273  		injectglist(gp.schedlink)
  1274  		casgstatus(gp, _Gwaiting, _Grunnable)
  1275  		return gp
  1276  	}
  1277  
  1278  	// If number of spinning M's >= number of busy P's, block.
  1279  	// This is necessary to prevent excessive CPU consumption
  1280  	// when GOMAXPROCS>>1 but the program parallelism is low.
  1281  	if !_g_.m.spinning && 2*atomicload(&sched.nmspinning) >= uint32(gomaxprocs)-atomicload(&sched.npidle) { // TODO: fast atomic
  1282  		goto stop
  1283  	}
  1284  	if !_g_.m.spinning {
  1285  		_g_.m.spinning = true
  1286  		xadd(&sched.nmspinning, 1)
  1287  	}
  1288  	// random steal from other P's
  1289  	for i := 0; i < int(2*gomaxprocs); i++ {
  1290  		if sched.gcwaiting != 0 {
  1291  			goto top
  1292  		}
  1293  		_p_ := allp[fastrand1()%uint32(gomaxprocs)]
  1294  		var gp *g
  1295  		if _p_ == _g_.m.p {
  1296  			gp = runqget(_p_)
  1297  		} else {
  1298  			gp = runqsteal(_g_.m.p, _p_)
  1299  		}
  1300  		if gp != nil {
  1301  			return gp
  1302  		}
  1303  	}
  1304  stop:
  1305  
  1306  	// return P and block
  1307  	lock(&sched.lock)
  1308  	if sched.gcwaiting != 0 {
  1309  		unlock(&sched.lock)
  1310  		goto top
  1311  	}
  1312  	if sched.runqsize != 0 {
  1313  		gp := globrunqget(_g_.m.p, 0)
  1314  		unlock(&sched.lock)
  1315  		return gp
  1316  	}
  1317  	_p_ := releasep()
  1318  	pidleput(_p_)
  1319  	unlock(&sched.lock)
  1320  	if _g_.m.spinning {
  1321  		_g_.m.spinning = false
  1322  		xadd(&sched.nmspinning, -1)
  1323  	}
  1324  
  1325  	// check all runqueues once again
  1326  	for i := 0; i < int(gomaxprocs); i++ {
  1327  		_p_ := allp[i]
  1328  		if _p_ != nil && _p_.runqhead != _p_.runqtail {
  1329  			lock(&sched.lock)
  1330  			_p_ = pidleget()
  1331  			unlock(&sched.lock)
  1332  			if _p_ != nil {
  1333  				acquirep(_p_)
  1334  				goto top
  1335  			}
  1336  			break
  1337  		}
  1338  	}
  1339  
  1340  	// poll network
  1341  	if xchg64(&sched.lastpoll, 0) != 0 {
  1342  		if _g_.m.p != nil {
  1343  			throw("findrunnable: netpoll with p")
  1344  		}
  1345  		if _g_.m.spinning {
  1346  			throw("findrunnable: netpoll with spinning")
  1347  		}
  1348  		gp := netpoll(true) // block until new work is available
  1349  		atomicstore64(&sched.lastpoll, uint64(nanotime()))
  1350  		if gp != nil {
  1351  			lock(&sched.lock)
  1352  			_p_ = pidleget()
  1353  			unlock(&sched.lock)
  1354  			if _p_ != nil {
  1355  				acquirep(_p_)
  1356  				injectglist(gp.schedlink)
  1357  				casgstatus(gp, _Gwaiting, _Grunnable)
  1358  				return gp
  1359  			}
  1360  			injectglist(gp)
  1361  		}
  1362  	}
  1363  	stopm()
  1364  	goto top
  1365  }
  1366  
  1367  func resetspinning() {
  1368  	_g_ := getg()
  1369  
  1370  	var nmspinning uint32
  1371  	if _g_.m.spinning {
  1372  		_g_.m.spinning = false
  1373  		nmspinning = xadd(&sched.nmspinning, -1)
  1374  		if nmspinning < 0 {
  1375  			throw("findrunnable: negative nmspinning")
  1376  		}
  1377  	} else {
  1378  		nmspinning = atomicload(&sched.nmspinning)
  1379  	}
  1380  
  1381  	// M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
  1382  	// so see if we need to wakeup another P here.
  1383  	if nmspinning == 0 && atomicload(&sched.npidle) > 0 {
  1384  		wakep()
  1385  	}
  1386  }
  1387  
  1388  // Injects the list of runnable G's into the scheduler.
  1389  // Can run concurrently with GC.
  1390  func injectglist(glist *g) {
  1391  	if glist == nil {
  1392  		return
  1393  	}
  1394  	lock(&sched.lock)
  1395  	var n int
  1396  	for n = 0; glist != nil; n++ {
  1397  		gp := glist
  1398  		glist = gp.schedlink
  1399  		casgstatus(gp, _Gwaiting, _Grunnable)
  1400  		globrunqput(gp)
  1401  	}
  1402  	unlock(&sched.lock)
  1403  	for ; n != 0 && sched.npidle != 0; n-- {
  1404  		startm(nil, false)
  1405  	}
  1406  }
  1407  
  1408  // One round of scheduler: find a runnable goroutine and execute it.
  1409  // Never returns.
  1410  func schedule() {
  1411  	_g_ := getg()
  1412  
  1413  	if _g_.m.locks != 0 {
  1414  		throw("schedule: holding locks")
  1415  	}
  1416  
  1417  	if _g_.m.lockedg != nil {
  1418  		stoplockedm()
  1419  		execute(_g_.m.lockedg) // Never returns.
  1420  	}
  1421  
  1422  top:
  1423  	if sched.gcwaiting != 0 {
  1424  		gcstopm()
  1425  		goto top
  1426  	}
  1427  
  1428  	var gp *g
  1429  	// Check the global runnable queue once in a while to ensure fairness.
  1430  	// Otherwise two goroutines can completely occupy the local runqueue
  1431  	// by constantly respawning each other.
  1432  	tick := _g_.m.p.schedtick
  1433  	// This is a fancy way to say tick%61==0,
  1434  	// it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
  1435  	if uint64(tick)-((uint64(tick)*0x4325c53f)>>36)*61 == 0 && sched.runqsize > 0 {
  1436  		lock(&sched.lock)
  1437  		gp = globrunqget(_g_.m.p, 1)
  1438  		unlock(&sched.lock)
  1439  		if gp != nil {
  1440  			resetspinning()
  1441  		}
  1442  	}
  1443  	if gp == nil {
  1444  		gp = runqget(_g_.m.p)
  1445  		if gp != nil && _g_.m.spinning {
  1446  			throw("schedule: spinning with local work")
  1447  		}
  1448  	}
  1449  	if gp == nil {
  1450  		gp = findrunnable() // blocks until work is available
  1451  		resetspinning()
  1452  	}
  1453  
  1454  	if gp.lockedm != nil {
  1455  		// Hands off own p to the locked m,
  1456  		// then blocks waiting for a new p.
  1457  		startlockedm(gp)
  1458  		goto top
  1459  	}
  1460  
  1461  	execute(gp)
  1462  }
  1463  
  1464  // dropg removes the association between m and the current goroutine m->curg (gp for short).
  1465  // Typically a caller sets gp's status away from Grunning and then
  1466  // immediately calls dropg to finish the job. The caller is also responsible
  1467  // for arranging that gp will be restarted using ready at an
  1468  // appropriate time. After calling dropg and arranging for gp to be
  1469  // readied later, the caller can do other work but eventually should
  1470  // call schedule to restart the scheduling of goroutines on this m.
  1471  func dropg() {
  1472  	_g_ := getg()
  1473  
  1474  	if _g_.m.lockedg == nil {
  1475  		_g_.m.curg.m = nil
  1476  		_g_.m.curg = nil
  1477  	}
  1478  }
  1479  
  1480  // Puts the current goroutine into a waiting state and calls unlockf.
  1481  // If unlockf returns false, the goroutine is resumed.
  1482  func park(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string) {
  1483  	_g_ := getg()
  1484  
  1485  	_g_.m.waitlock = lock
  1486  	_g_.m.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
  1487  	_g_.waitreason = reason
  1488  	mcall(park_m)
  1489  }
  1490  
  1491  func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
  1492  	unlock((*mutex)(lock))
  1493  	return true
  1494  }
  1495  
  1496  // Puts the current goroutine into a waiting state and unlocks the lock.
  1497  // The goroutine can be made runnable again by calling ready(gp).
  1498  func parkunlock(lock *mutex, reason string) {
  1499  	park(parkunlock_c, unsafe.Pointer(lock), reason)
  1500  }
  1501  
  1502  // park continuation on g0.
  1503  func park_m(gp *g) {
  1504  	_g_ := getg()
  1505  
  1506  	casgstatus(gp, _Grunning, _Gwaiting)
  1507  	dropg()
  1508  
  1509  	if _g_.m.waitunlockf != nil {
  1510  		fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
  1511  		ok := fn(gp, _g_.m.waitlock)
  1512  		_g_.m.waitunlockf = nil
  1513  		_g_.m.waitlock = nil
  1514  		if !ok {
  1515  			casgstatus(gp, _Gwaiting, _Grunnable)
  1516  			execute(gp) // Schedule it back, never returns.
  1517  		}
  1518  	}
  1519  	schedule()
  1520  }
  1521  
  1522  // Gosched continuation on g0.
  1523  func gosched_m(gp *g) {
  1524  	status := readgstatus(gp)
  1525  	if status&^_Gscan != _Grunning {
  1526  		dumpgstatus(gp)
  1527  		throw("bad g status")
  1528  	}
  1529  	casgstatus(gp, _Grunning, _Grunnable)
  1530  	dropg()
  1531  	lock(&sched.lock)
  1532  	globrunqput(gp)
  1533  	unlock(&sched.lock)
  1534  
  1535  	schedule()
  1536  }
  1537  
  1538  // Finishes execution of the current goroutine.
  1539  // Must be NOSPLIT because it is called from Go. (TODO - probably not anymore)
  1540  //go:nosplit
  1541  func goexit1() {
  1542  	if raceenabled {
  1543  		racegoend()
  1544  	}
  1545  	mcall(goexit0)
  1546  }
  1547  
  1548  // goexit continuation on g0.
  1549  func goexit0(gp *g) {
  1550  	_g_ := getg()
  1551  
  1552  	casgstatus(gp, _Grunning, _Gdead)
  1553  	gp.m = nil
  1554  	gp.lockedm = nil
  1555  	_g_.m.lockedg = nil
  1556  	gp.paniconfault = false
  1557  	gp._defer = nil // should be true already but just in case.
  1558  	gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
  1559  	gp.writebuf = nil
  1560  	gp.waitreason = ""
  1561  	gp.param = nil
  1562  
  1563  	dropg()
  1564  
  1565  	if _g_.m.locked&^_LockExternal != 0 {
  1566  		print("invalid m->locked = ", _g_.m.locked, "\n")
  1567  		throw("internal lockOSThread error")
  1568  	}
  1569  	_g_.m.locked = 0
  1570  	gfput(_g_.m.p, gp)
  1571  	schedule()
  1572  }
  1573  
  1574  //go:nosplit
  1575  //go:nowritebarrier
  1576  func save(pc, sp uintptr) {
  1577  	_g_ := getg()
  1578  
  1579  	_g_.sched.pc = pc
  1580  	_g_.sched.sp = sp
  1581  	_g_.sched.lr = 0
  1582  	_g_.sched.ret = 0
  1583  	_g_.sched.ctxt = nil
  1584  	_g_.sched.g = guintptr(unsafe.Pointer(_g_))
  1585  }
  1586  
  1587  // The goroutine g is about to enter a system call.
  1588  // Record that it's not using the cpu anymore.
  1589  // This is called only from the go syscall library and cgocall,
  1590  // not from the low-level system calls used by the
  1591  //
  1592  // Entersyscall cannot split the stack: the gosave must
  1593  // make g->sched refer to the caller's stack segment, because
  1594  // entersyscall is going to return immediately after.
  1595  //
  1596  // Nothing entersyscall calls can split the stack either.
  1597  // We cannot safely move the stack during an active call to syscall,
  1598  // because we do not know which of the uintptr arguments are
  1599  // really pointers (back into the stack).
  1600  // In practice, this means that we make the fast path run through
  1601  // entersyscall doing no-split things, and the slow path has to use systemstack
  1602  // to run bigger things on the system stack.
  1603  //
  1604  // reentersyscall is the entry point used by cgo callbacks, where explicitly
  1605  // saved SP and PC are restored. This is needed when exitsyscall will be called
  1606  // from a function further up in the call stack than the parent, as g->syscallsp
  1607  // must always point to a valid stack frame. entersyscall below is the normal
  1608  // entry point for syscalls, which obtains the SP and PC from the caller.
  1609  //go:nosplit
  1610  func reentersyscall(pc, sp uintptr) {
  1611  	_g_ := getg()
  1612  
  1613  	// Disable preemption because during this function g is in Gsyscall status,
  1614  	// but can have inconsistent g->sched, do not let GC observe it.
  1615  	_g_.m.locks++
  1616  
  1617  	// Entersyscall must not call any function that might split/grow the stack.
  1618  	// (See details in comment above.)
  1619  	// Catch calls that might, by replacing the stack guard with something that
  1620  	// will trip any stack check and leaving a flag to tell newstack to die.
  1621  	_g_.stackguard0 = stackPreempt
  1622  	_g_.throwsplit = true
  1623  
  1624  	// Leave SP around for GC and traceback.
  1625  	save(pc, sp)
  1626  	_g_.syscallsp = sp
  1627  	_g_.syscallpc = pc
  1628  	casgstatus(_g_, _Grunning, _Gsyscall)
  1629  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  1630  		systemstack(func() {
  1631  			print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  1632  			throw("entersyscall")
  1633  		})
  1634  	}
  1635  
  1636  	if atomicload(&sched.sysmonwait) != 0 { // TODO: fast atomic
  1637  		systemstack(entersyscall_sysmon)
  1638  		save(pc, sp)
  1639  	}
  1640  
  1641  	_g_.m.mcache = nil
  1642  	_g_.m.p.m = nil
  1643  	atomicstore(&_g_.m.p.status, _Psyscall)
  1644  	if sched.gcwaiting != 0 {
  1645  		systemstack(entersyscall_gcwait)
  1646  		save(pc, sp)
  1647  	}
  1648  
  1649  	// Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
  1650  	// We set _StackGuard to StackPreempt so that first split stack check calls morestack.
  1651  	// Morestack detects this case and throws.
  1652  	_g_.stackguard0 = stackPreempt
  1653  	_g_.m.locks--
  1654  }
  1655  
  1656  // Standard syscall entry used by the go syscall library and normal cgo calls.
  1657  //go:nosplit
  1658  func entersyscall(dummy int32) {
  1659  	reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
  1660  }
  1661  
  1662  func entersyscall_sysmon() {
  1663  	lock(&sched.lock)
  1664  	if atomicload(&sched.sysmonwait) != 0 {
  1665  		atomicstore(&sched.sysmonwait, 0)
  1666  		notewakeup(&sched.sysmonnote)
  1667  	}
  1668  	unlock(&sched.lock)
  1669  }
  1670  
  1671  func entersyscall_gcwait() {
  1672  	_g_ := getg()
  1673  
  1674  	lock(&sched.lock)
  1675  	if sched.stopwait > 0 && cas(&_g_.m.p.status, _Psyscall, _Pgcstop) {
  1676  		if sched.stopwait--; sched.stopwait == 0 {
  1677  			notewakeup(&sched.stopnote)
  1678  		}
  1679  	}
  1680  	unlock(&sched.lock)
  1681  }
  1682  
  1683  // The same as entersyscall(), but with a hint that the syscall is blocking.
  1684  //go:nosplit
  1685  func entersyscallblock(dummy int32) {
  1686  	_g_ := getg()
  1687  
  1688  	_g_.m.locks++ // see comment in entersyscall
  1689  	_g_.throwsplit = true
  1690  	_g_.stackguard0 = stackPreempt // see comment in entersyscall
  1691  
  1692  	// Leave SP around for GC and traceback.
  1693  	pc := getcallerpc(unsafe.Pointer(&dummy))
  1694  	sp := getcallersp(unsafe.Pointer(&dummy))
  1695  	save(pc, sp)
  1696  	_g_.syscallsp = _g_.sched.sp
  1697  	_g_.syscallpc = _g_.sched.pc
  1698  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  1699  		sp1 := sp
  1700  		sp2 := _g_.sched.sp
  1701  		sp3 := _g_.syscallsp
  1702  		systemstack(func() {
  1703  			print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  1704  			throw("entersyscallblock")
  1705  		})
  1706  	}
  1707  	casgstatus(_g_, _Grunning, _Gsyscall)
  1708  	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
  1709  		systemstack(func() {
  1710  			print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
  1711  			throw("entersyscallblock")
  1712  		})
  1713  	}
  1714  
  1715  	systemstack(entersyscallblock_handoff)
  1716  
  1717  	// Resave for traceback during blocked call.
  1718  	save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
  1719  
  1720  	_g_.m.locks--
  1721  }
  1722  
  1723  func entersyscallblock_handoff() {
  1724  	handoffp(releasep())
  1725  }
  1726  
  1727  // The goroutine g exited its system call.
  1728  // Arrange for it to run on a cpu again.
  1729  // This is called only from the go syscall library, not
  1730  // from the low-level system calls used by the
  1731  //go:nosplit
  1732  func exitsyscall(dummy int32) {
  1733  	_g_ := getg()
  1734  
  1735  	_g_.m.locks++ // see comment in entersyscall
  1736  	if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp {
  1737  		throw("exitsyscall: syscall frame is no longer valid")
  1738  	}
  1739  
  1740  	_g_.waitsince = 0
  1741  	if exitsyscallfast() {
  1742  		if _g_.m.mcache == nil {
  1743  			throw("lost mcache")
  1744  		}
  1745  		// There's a cpu for us, so we can run.
  1746  		_g_.m.p.syscalltick++
  1747  		// We need to cas the status and scan before resuming...
  1748  		casgstatus(_g_, _Gsyscall, _Grunning)
  1749  
  1750  		// Garbage collector isn't running (since we are),
  1751  		// so okay to clear syscallsp.
  1752  		_g_.syscallsp = 0
  1753  		_g_.m.locks--
  1754  		if _g_.preempt {
  1755  			// restore the preemption request in case we've cleared it in newstack
  1756  			_g_.stackguard0 = stackPreempt
  1757  		} else {
  1758  			// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
  1759  			_g_.stackguard0 = _g_.stack.lo + _StackGuard
  1760  		}
  1761  		_g_.throwsplit = false
  1762  		return
  1763  	}
  1764  
  1765  	_g_.m.locks--
  1766  
  1767  	// Call the scheduler.
  1768  	mcall(exitsyscall0)
  1769  
  1770  	if _g_.m.mcache == nil {
  1771  		throw("lost mcache")
  1772  	}
  1773  
  1774  	// Scheduler returned, so we're allowed to run now.
  1775  	// Delete the syscallsp information that we left for
  1776  	// the garbage collector during the system call.
  1777  	// Must wait until now because until gosched returns
  1778  	// we don't know for sure that the garbage collector
  1779  	// is not running.
  1780  	_g_.syscallsp = 0
  1781  	_g_.m.p.syscalltick++
  1782  	_g_.throwsplit = false
  1783  }
  1784  
  1785  //go:nosplit
  1786  func exitsyscallfast() bool {
  1787  	_g_ := getg()
  1788  
  1789  	// Freezetheworld sets stopwait but does not retake P's.
  1790  	if sched.stopwait != 0 {
  1791  		_g_.m.mcache = nil
  1792  		_g_.m.p = nil
  1793  		return false
  1794  	}
  1795  
  1796  	// Try to re-acquire the last P.
  1797  	if _g_.m.p != nil && _g_.m.p.status == _Psyscall && cas(&_g_.m.p.status, _Psyscall, _Prunning) {
  1798  		// There's a cpu for us, so we can run.
  1799  		_g_.m.mcache = _g_.m.p.mcache
  1800  		_g_.m.p.m = _g_.m
  1801  		return true
  1802  	}
  1803  
  1804  	// Try to get any other idle P.
  1805  	_g_.m.mcache = nil
  1806  	_g_.m.p = nil
  1807  	if sched.pidle != nil {
  1808  		var ok bool
  1809  		systemstack(func() {
  1810  			ok = exitsyscallfast_pidle()
  1811  		})
  1812  		if ok {
  1813  			return true
  1814  		}
  1815  	}
  1816  	return false
  1817  }
  1818  
  1819  func exitsyscallfast_pidle() bool {
  1820  	lock(&sched.lock)
  1821  	_p_ := pidleget()
  1822  	if _p_ != nil && atomicload(&sched.sysmonwait) != 0 {
  1823  		atomicstore(&sched.sysmonwait, 0)
  1824  		notewakeup(&sched.sysmonnote)
  1825  	}
  1826  	unlock(&sched.lock)
  1827  	if _p_ != nil {
  1828  		acquirep(_p_)
  1829  		return true
  1830  	}
  1831  	return false
  1832  }
  1833  
  1834  // exitsyscall slow path on g0.
  1835  // Failed to acquire P, enqueue gp as runnable.
  1836  func exitsyscall0(gp *g) {
  1837  	_g_ := getg()
  1838  
  1839  	casgstatus(gp, _Gsyscall, _Grunnable)
  1840  	dropg()
  1841  	lock(&sched.lock)
  1842  	_p_ := pidleget()
  1843  	if _p_ == nil {
  1844  		globrunqput(gp)
  1845  	} else if atomicload(&sched.sysmonwait) != 0 {
  1846  		atomicstore(&sched.sysmonwait, 0)
  1847  		notewakeup(&sched.sysmonnote)
  1848  	}
  1849  	unlock(&sched.lock)
  1850  	if _p_ != nil {
  1851  		acquirep(_p_)
  1852  		execute(gp) // Never returns.
  1853  	}
  1854  	if _g_.m.lockedg != nil {
  1855  		// Wait until another thread schedules gp and so m again.
  1856  		stoplockedm()
  1857  		execute(gp) // Never returns.
  1858  	}
  1859  	stopm()
  1860  	schedule() // Never returns.
  1861  }
  1862  
  1863  func beforefork() {
  1864  	gp := getg().m.curg
  1865  
  1866  	// Fork can hang if preempted with signals frequently enough (see issue 5517).
  1867  	// Ensure that we stay on the same M where we disable profiling.
  1868  	gp.m.locks++
  1869  	if gp.m.profilehz != 0 {
  1870  		resetcpuprofiler(0)
  1871  	}
  1872  
  1873  	// This function is called before fork in syscall package.
  1874  	// Code between fork and exec must not allocate memory nor even try to grow stack.
  1875  	// Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
  1876  	// runtime_AfterFork will undo this in parent process, but not in child.
  1877  	gp.stackguard0 = stackFork
  1878  }
  1879  
  1880  // Called from syscall package before fork.
  1881  //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
  1882  //go:nosplit
  1883  func syscall_runtime_BeforeFork() {
  1884  	systemstack(beforefork)
  1885  }
  1886  
  1887  func afterfork() {
  1888  	gp := getg().m.curg
  1889  
  1890  	// See the comment in beforefork.
  1891  	gp.stackguard0 = gp.stack.lo + _StackGuard
  1892  
  1893  	hz := sched.profilehz
  1894  	if hz != 0 {
  1895  		resetcpuprofiler(hz)
  1896  	}
  1897  	gp.m.locks--
  1898  }
  1899  
  1900  // Called from syscall package after fork in parent.
  1901  //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
  1902  //go:nosplit
  1903  func syscall_runtime_AfterFork() {
  1904  	systemstack(afterfork)
  1905  }
  1906  
  1907  // Allocate a new g, with a stack big enough for stacksize bytes.
  1908  func malg(stacksize int32) *g {
  1909  	newg := allocg()
  1910  	if stacksize >= 0 {
  1911  		stacksize = round2(_StackSystem + stacksize)
  1912  		systemstack(func() {
  1913  			newg.stack = stackalloc(uint32(stacksize))
  1914  		})
  1915  		newg.stackguard0 = newg.stack.lo + _StackGuard
  1916  		newg.stackguard1 = ^uintptr(0)
  1917  	}
  1918  	return newg
  1919  }
  1920  
  1921  // Create a new g running fn with siz bytes of arguments.
  1922  // Put it on the queue of g's waiting to run.
  1923  // The compiler turns a go statement into a call to this.
  1924  // Cannot split the stack because it assumes that the arguments
  1925  // are available sequentially after &fn; they would not be
  1926  // copied if a stack split occurred.
  1927  //go:nosplit
  1928  func newproc(siz int32, fn *funcval) {
  1929  	argp := add(unsafe.Pointer(&fn), ptrSize)
  1930  	pc := getcallerpc(unsafe.Pointer(&siz))
  1931  	systemstack(func() {
  1932  		newproc1(fn, (*uint8)(argp), siz, 0, pc)
  1933  	})
  1934  }
  1935  
  1936  // Create a new g running fn with narg bytes of arguments starting
  1937  // at argp and returning nret bytes of results.  callerpc is the
  1938  // address of the go statement that created this.  The new g is put
  1939  // on the queue of g's waiting to run.
  1940  func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g {
  1941  	_g_ := getg()
  1942  
  1943  	if fn == nil {
  1944  		_g_.m.throwing = -1 // do not dump full stacks
  1945  		throw("go of nil func value")
  1946  	}
  1947  	_g_.m.locks++ // disable preemption because it can be holding p in a local var
  1948  	siz := narg + nret
  1949  	siz = (siz + 7) &^ 7
  1950  
  1951  	// We could allocate a larger initial stack if necessary.
  1952  	// Not worth it: this is almost always an error.
  1953  	// 4*sizeof(uintreg): extra space added below
  1954  	// sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
  1955  	if siz >= _StackMin-4*regSize-regSize {
  1956  		throw("newproc: function arguments too large for new goroutine")
  1957  	}
  1958  
  1959  	_p_ := _g_.m.p
  1960  	newg := gfget(_p_)
  1961  	if newg == nil {
  1962  		newg = malg(_StackMin)
  1963  		casgstatus(newg, _Gidle, _Gdead)
  1964  		allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
  1965  	}
  1966  	if newg.stack.hi == 0 {
  1967  		throw("newproc1: newg missing stack")
  1968  	}
  1969  
  1970  	if readgstatus(newg) != _Gdead {
  1971  		throw("newproc1: new g is not Gdead")
  1972  	}
  1973  
  1974  	sp := newg.stack.hi
  1975  	sp -= 4 * regSize // extra space in case of reads slightly beyond frame
  1976  	sp -= uintptr(siz)
  1977  	memmove(unsafe.Pointer(sp), unsafe.Pointer(argp), uintptr(narg))
  1978  	if hasLinkRegister {
  1979  		// caller's LR
  1980  		sp -= ptrSize
  1981  		*(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil
  1982  	}
  1983  
  1984  	memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
  1985  	newg.sched.sp = sp
  1986  	newg.sched.pc = funcPC(goexit) + _PCQuantum // +PCQuantum so that previous instruction is in same function
  1987  	newg.sched.g = guintptr(unsafe.Pointer(newg))
  1988  	gostartcallfn(&newg.sched, fn)
  1989  	newg.gopc = callerpc
  1990  	casgstatus(newg, _Gdead, _Grunnable)
  1991  
  1992  	if _p_.goidcache == _p_.goidcacheend {
  1993  		// Sched.goidgen is the last allocated id,
  1994  		// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
  1995  		// At startup sched.goidgen=0, so main goroutine receives goid=1.
  1996  		_p_.goidcache = xadd64(&sched.goidgen, _GoidCacheBatch)
  1997  		_p_.goidcache -= _GoidCacheBatch - 1
  1998  		_p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
  1999  	}
  2000  	newg.goid = int64(_p_.goidcache)
  2001  	_p_.goidcache++
  2002  	if raceenabled {
  2003  		newg.racectx = racegostart(callerpc)
  2004  	}
  2005  	runqput(_p_, newg)
  2006  
  2007  	if atomicload(&sched.npidle) != 0 && atomicload(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic
  2008  		wakep()
  2009  	}
  2010  	_g_.m.locks--
  2011  	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
  2012  		_g_.stackguard0 = stackPreempt
  2013  	}
  2014  	return newg
  2015  }
  2016  
  2017  // Put on gfree list.
  2018  // If local list is too long, transfer a batch to the global list.
  2019  func gfput(_p_ *p, gp *g) {
  2020  	if readgstatus(gp) != _Gdead {
  2021  		throw("gfput: bad status (not Gdead)")
  2022  	}
  2023  
  2024  	stksize := gp.stack.hi - gp.stack.lo
  2025  
  2026  	if stksize != _FixedStack {
  2027  		// non-standard stack size - free it.
  2028  		stackfree(gp.stack)
  2029  		gp.stack.lo = 0
  2030  		gp.stack.hi = 0
  2031  		gp.stackguard0 = 0
  2032  	}
  2033  
  2034  	gp.schedlink = _p_.gfree
  2035  	_p_.gfree = gp
  2036  	_p_.gfreecnt++
  2037  	if _p_.gfreecnt >= 64 {
  2038  		lock(&sched.gflock)
  2039  		for _p_.gfreecnt >= 32 {
  2040  			_p_.gfreecnt--
  2041  			gp = _p_.gfree
  2042  			_p_.gfree = gp.schedlink
  2043  			gp.schedlink = sched.gfree
  2044  			sched.gfree = gp
  2045  			sched.ngfree++
  2046  		}
  2047  		unlock(&sched.gflock)
  2048  	}
  2049  }
  2050  
  2051  // Get from gfree list.
  2052  // If local list is empty, grab a batch from global list.
  2053  func gfget(_p_ *p) *g {
  2054  retry:
  2055  	gp := _p_.gfree
  2056  	if gp == nil && sched.gfree != nil {
  2057  		lock(&sched.gflock)
  2058  		for _p_.gfreecnt < 32 && sched.gfree != nil {
  2059  			_p_.gfreecnt++
  2060  			gp = sched.gfree
  2061  			sched.gfree = gp.schedlink
  2062  			sched.ngfree--
  2063  			gp.schedlink = _p_.gfree
  2064  			_p_.gfree = gp
  2065  		}
  2066  		unlock(&sched.gflock)
  2067  		goto retry
  2068  	}
  2069  	if gp != nil {
  2070  		_p_.gfree = gp.schedlink
  2071  		_p_.gfreecnt--
  2072  		if gp.stack.lo == 0 {
  2073  			// Stack was deallocated in gfput.  Allocate a new one.
  2074  			systemstack(func() {
  2075  				gp.stack = stackalloc(_FixedStack)
  2076  			})
  2077  			gp.stackguard0 = gp.stack.lo + _StackGuard
  2078  		} else {
  2079  			if raceenabled {
  2080  				racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
  2081  			}
  2082  		}
  2083  	}
  2084  	return gp
  2085  }
  2086  
  2087  // Purge all cached G's from gfree list to the global list.
  2088  func gfpurge(_p_ *p) {
  2089  	lock(&sched.gflock)
  2090  	for _p_.gfreecnt != 0 {
  2091  		_p_.gfreecnt--
  2092  		gp := _p_.gfree
  2093  		_p_.gfree = gp.schedlink
  2094  		gp.schedlink = sched.gfree
  2095  		sched.gfree = gp
  2096  		sched.ngfree++
  2097  	}
  2098  	unlock(&sched.gflock)
  2099  }
  2100  
  2101  // Breakpoint executes a breakpoint trap.
  2102  func Breakpoint() {
  2103  	breakpoint()
  2104  }
  2105  
  2106  // dolockOSThread is called by LockOSThread and lockOSThread below
  2107  // after they modify m.locked. Do not allow preemption during this call,
  2108  // or else the m might be different in this function than in the caller.
  2109  //go:nosplit
  2110  func dolockOSThread() {
  2111  	_g_ := getg()
  2112  	_g_.m.lockedg = _g_
  2113  	_g_.lockedm = _g_.m
  2114  }
  2115  
  2116  //go:nosplit
  2117  
  2118  // LockOSThread wires the calling goroutine to its current operating system thread.
  2119  // Until the calling goroutine exits or calls UnlockOSThread, it will always
  2120  // execute in that thread, and no other goroutine can.
  2121  func LockOSThread() {
  2122  	getg().m.locked |= _LockExternal
  2123  	dolockOSThread()
  2124  }
  2125  
  2126  //go:nosplit
  2127  func lockOSThread() {
  2128  	getg().m.locked += _LockInternal
  2129  	dolockOSThread()
  2130  }
  2131  
  2132  // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
  2133  // after they update m->locked. Do not allow preemption during this call,
  2134  // or else the m might be in different in this function than in the caller.
  2135  //go:nosplit
  2136  func dounlockOSThread() {
  2137  	_g_ := getg()
  2138  	if _g_.m.locked != 0 {
  2139  		return
  2140  	}
  2141  	_g_.m.lockedg = nil
  2142  	_g_.lockedm = nil
  2143  }
  2144  
  2145  //go:nosplit
  2146  
  2147  // UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
  2148  // If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
  2149  func UnlockOSThread() {
  2150  	getg().m.locked &^= _LockExternal
  2151  	dounlockOSThread()
  2152  }
  2153  
  2154  //go:nosplit
  2155  func unlockOSThread() {
  2156  	_g_ := getg()
  2157  	if _g_.m.locked < _LockInternal {
  2158  		systemstack(badunlockosthread)
  2159  	}
  2160  	_g_.m.locked -= _LockInternal
  2161  	dounlockOSThread()
  2162  }
  2163  
  2164  func badunlockosthread() {
  2165  	throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
  2166  }
  2167  
  2168  func gcount() int32 {
  2169  	n := int32(allglen) - sched.ngfree
  2170  	for i := 0; ; i++ {
  2171  		_p_ := allp[i]
  2172  		if _p_ == nil {
  2173  			break
  2174  		}
  2175  		n -= _p_.gfreecnt
  2176  	}
  2177  
  2178  	// All these variables can be changed concurrently, so the result can be inconsistent.
  2179  	// But at least the current goroutine is running.
  2180  	if n < 1 {
  2181  		n = 1
  2182  	}
  2183  	return n
  2184  }
  2185  
  2186  func mcount() int32 {
  2187  	return sched.mcount
  2188  }
  2189  
  2190  var prof struct {
  2191  	lock uint32
  2192  	hz   int32
  2193  }
  2194  
  2195  func _System()       { _System() }
  2196  func _ExternalCode() { _ExternalCode() }
  2197  func _GC()           { _GC() }
  2198  
  2199  var etext struct{}
  2200  
  2201  // Called if we receive a SIGPROF signal.
  2202  func sigprof(pc *uint8, sp *uint8, lr *uint8, gp *g, mp *m) {
  2203  	var n int32
  2204  	var traceback bool
  2205  	var stk [100]uintptr
  2206  
  2207  	if prof.hz == 0 {
  2208  		return
  2209  	}
  2210  
  2211  	// Profiling runs concurrently with GC, so it must not allocate.
  2212  	mp.mallocing++
  2213  
  2214  	// Define that a "user g" is a user-created goroutine, and a "system g"
  2215  	// is one that is m->g0 or m->gsignal. We've only made sure that we
  2216  	// can unwind user g's, so exclude the system g's.
  2217  	//
  2218  	// It is not quite as easy as testing gp == m->curg (the current user g)
  2219  	// because we might be interrupted for profiling halfway through a
  2220  	// goroutine switch. The switch involves updating three (or four) values:
  2221  	// g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
  2222  	// because once it gets updated the new g is running.
  2223  	//
  2224  	// When switching from a user g to a system g, LR is not considered live,
  2225  	// so the update only affects g, SP, and PC. Since PC must be last, there
  2226  	// the possible partial transitions in ordinary execution are (1) g alone is updated,
  2227  	// (2) both g and SP are updated, and (3) SP alone is updated.
  2228  	// If g is updated, we'll see a system g and not look closer.
  2229  	// If SP alone is updated, we can detect the partial transition by checking
  2230  	// whether the SP is within g's stack bounds. (We could also require that SP
  2231  	// be changed only after g, but the stack bounds check is needed by other
  2232  	// cases, so there is no need to impose an additional requirement.)
  2233  	//
  2234  	// There is one exceptional transition to a system g, not in ordinary execution.
  2235  	// When a signal arrives, the operating system starts the signal handler running
  2236  	// with an updated PC and SP. The g is updated last, at the beginning of the
  2237  	// handler. There are two reasons this is okay. First, until g is updated the
  2238  	// g and SP do not match, so the stack bounds check detects the partial transition.
  2239  	// Second, signal handlers currently run with signals disabled, so a profiling
  2240  	// signal cannot arrive during the handler.
  2241  	//
  2242  	// When switching from a system g to a user g, there are three possibilities.
  2243  	//
  2244  	// First, it may be that the g switch has no PC update, because the SP
  2245  	// either corresponds to a user g throughout (as in asmcgocall)
  2246  	// or because it has been arranged to look like a user g frame
  2247  	// (as in cgocallback_gofunc). In this case, since the entire
  2248  	// transition is a g+SP update, a partial transition updating just one of
  2249  	// those will be detected by the stack bounds check.
  2250  	//
  2251  	// Second, when returning from a signal handler, the PC and SP updates
  2252  	// are performed by the operating system in an atomic update, so the g
  2253  	// update must be done before them. The stack bounds check detects
  2254  	// the partial transition here, and (again) signal handlers run with signals
  2255  	// disabled, so a profiling signal cannot arrive then anyway.
  2256  	//
  2257  	// Third, the common case: it may be that the switch updates g, SP, and PC
  2258  	// separately, as in gogo.
  2259  	//
  2260  	// Because gogo is the only instance, we check whether the PC lies
  2261  	// within that function, and if so, not ask for a traceback. This approach
  2262  	// requires knowing the size of the gogo function, which we
  2263  	// record in arch_*.h and check in runtime_test.go.
  2264  	//
  2265  	// There is another apparently viable approach, recorded here in case
  2266  	// the "PC within gogo" check turns out not to be usable.
  2267  	// It would be possible to delay the update of either g or SP until immediately
  2268  	// before the PC update instruction. Then, because of the stack bounds check,
  2269  	// the only problematic interrupt point is just before that PC update instruction,
  2270  	// and the sigprof handler can detect that instruction and simulate stepping past
  2271  	// it in order to reach a consistent state. On ARM, the update of g must be made
  2272  	// in two places (in R10 and also in a TLS slot), so the delayed update would
  2273  	// need to be the SP update. The sigprof handler must read the instruction at
  2274  	// the current PC and if it was the known instruction (for example, JMP BX or
  2275  	// MOV R2, PC), use that other register in place of the PC value.
  2276  	// The biggest drawback to this solution is that it requires that we can tell
  2277  	// whether it's safe to read from the memory pointed at by PC.
  2278  	// In a correct program, we can test PC == nil and otherwise read,
  2279  	// but if a profiling signal happens at the instant that a program executes
  2280  	// a bad jump (before the program manages to handle the resulting fault)
  2281  	// the profiling handler could fault trying to read nonexistent memory.
  2282  	//
  2283  	// To recap, there are no constraints on the assembly being used for the
  2284  	// transition. We simply require that g and SP match and that the PC is not
  2285  	// in gogo.
  2286  	traceback = true
  2287  	usp := uintptr(unsafe.Pointer(sp))
  2288  	gogo := funcPC(gogo)
  2289  	if gp == nil || gp != mp.curg ||
  2290  		usp < gp.stack.lo || gp.stack.hi < usp ||
  2291  		(gogo <= uintptr(unsafe.Pointer(pc)) && uintptr(unsafe.Pointer(pc)) < gogo+_RuntimeGogoBytes) {
  2292  		traceback = false
  2293  	}
  2294  
  2295  	n = 0
  2296  	if traceback {
  2297  		n = int32(gentraceback(uintptr(unsafe.Pointer(pc)), uintptr(unsafe.Pointer(sp)), uintptr(unsafe.Pointer(lr)), gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap))
  2298  	}
  2299  	if !traceback || n <= 0 {
  2300  		// Normal traceback is impossible or has failed.
  2301  		// See if it falls into several common cases.
  2302  		n = 0
  2303  		if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
  2304  			// Cgo, we can't unwind and symbolize arbitrary C code,
  2305  			// so instead collect Go stack that leads to the cgo call.
  2306  			// This is especially important on windows, since all syscalls are cgo calls.
  2307  			n = int32(gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0))
  2308  		}
  2309  		if GOOS == "windows" && n == 0 && mp.libcallg != nil && mp.libcallpc != 0 && mp.libcallsp != 0 {
  2310  			// Libcall, i.e. runtime syscall on windows.
  2311  			// Collect Go stack that leads to the call.
  2312  			n = int32(gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg, 0, &stk[0], len(stk), nil, nil, 0))
  2313  		}
  2314  		if n == 0 {
  2315  			// If all of the above has failed, account it against abstract "System" or "GC".
  2316  			n = 2
  2317  			// "ExternalCode" is better than "etext".
  2318  			if uintptr(unsafe.Pointer(pc)) > uintptr(unsafe.Pointer(&etext)) {
  2319  				pc = (*uint8)(unsafe.Pointer(uintptr(funcPC(_ExternalCode) + _PCQuantum)))
  2320  			}
  2321  			stk[0] = uintptr(unsafe.Pointer(pc))
  2322  			if mp.gcing != 0 || mp.helpgc != 0 {
  2323  				stk[1] = funcPC(_GC) + _PCQuantum
  2324  			} else {
  2325  				stk[1] = funcPC(_System) + _PCQuantum
  2326  			}
  2327  		}
  2328  	}
  2329  
  2330  	if prof.hz != 0 {
  2331  		// Simple cas-lock to coordinate with setcpuprofilerate.
  2332  		for !cas(&prof.lock, 0, 1) {
  2333  			osyield()
  2334  		}
  2335  		if prof.hz != 0 {
  2336  			cpuproftick(&stk[0], n)
  2337  		}
  2338  		atomicstore(&prof.lock, 0)
  2339  	}
  2340  	mp.mallocing--
  2341  }
  2342  
  2343  // Arrange to call fn with a traceback hz times a second.
  2344  func setcpuprofilerate_m(hz int32) {
  2345  	// Force sane arguments.
  2346  	if hz < 0 {
  2347  		hz = 0
  2348  	}
  2349  
  2350  	// Disable preemption, otherwise we can be rescheduled to another thread
  2351  	// that has profiling enabled.
  2352  	_g_ := getg()
  2353  	_g_.m.locks++
  2354  
  2355  	// Stop profiler on this thread so that it is safe to lock prof.
  2356  	// if a profiling signal came in while we had prof locked,
  2357  	// it would deadlock.
  2358  	resetcpuprofiler(0)
  2359  
  2360  	for !cas(&prof.lock, 0, 1) {
  2361  		osyield()
  2362  	}
  2363  	prof.hz = hz
  2364  	atomicstore(&prof.lock, 0)
  2365  
  2366  	lock(&sched.lock)
  2367  	sched.profilehz = hz
  2368  	unlock(&sched.lock)
  2369  
  2370  	if hz != 0 {
  2371  		resetcpuprofiler(hz)
  2372  	}
  2373  
  2374  	_g_.m.locks--
  2375  }
  2376  
  2377  // Change number of processors.  The world is stopped, sched is locked.
  2378  // gcworkbufs are not being modified by either the GC or
  2379  // the write barrier code.
  2380  // Returns list of Ps with local work, they need to be scheduled by the caller.
  2381  func procresize(new int32) *p {
  2382  	old := gomaxprocs
  2383  	if old < 0 || old > _MaxGomaxprocs || new <= 0 || new > _MaxGomaxprocs {
  2384  		throw("procresize: invalid arg")
  2385  	}
  2386  
  2387  	// initialize new P's
  2388  	for i := int32(0); i < new; i++ {
  2389  		p := allp[i]
  2390  		if p == nil {
  2391  			p = newP()
  2392  			p.id = i
  2393  			p.status = _Pgcstop
  2394  			atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(p))
  2395  		}
  2396  		if p.mcache == nil {
  2397  			if old == 0 && i == 0 {
  2398  				if getg().m.mcache == nil {
  2399  					throw("missing mcache?")
  2400  				}
  2401  				p.mcache = getg().m.mcache // bootstrap
  2402  			} else {
  2403  				p.mcache = allocmcache()
  2404  			}
  2405  		}
  2406  	}
  2407  
  2408  	// free unused P's
  2409  	for i := new; i < old; i++ {
  2410  		p := allp[i]
  2411  		// move all runable goroutines to the global queue
  2412  		for p.runqhead != p.runqtail {
  2413  			// pop from tail of local queue
  2414  			p.runqtail--
  2415  			gp := p.runq[p.runqtail%uint32(len(p.runq))]
  2416  			// push onto head of global queue
  2417  			gp.schedlink = sched.runqhead
  2418  			sched.runqhead = gp
  2419  			if sched.runqtail == nil {
  2420  				sched.runqtail = gp
  2421  			}
  2422  			sched.runqsize++
  2423  		}
  2424  		freemcache(p.mcache)
  2425  		p.mcache = nil
  2426  		gfpurge(p)
  2427  		p.status = _Pdead
  2428  		// can't free P itself because it can be referenced by an M in syscall
  2429  	}
  2430  
  2431  	_g_ := getg()
  2432  	if _g_.m.p != nil && _g_.m.p.id < new {
  2433  		// continue to use the current P
  2434  		_g_.m.p.status = _Prunning
  2435  	} else {
  2436  		// release the current P and acquire allp[0]
  2437  		if _g_.m.p != nil {
  2438  			_g_.m.p.m = nil
  2439  		}
  2440  		_g_.m.p = nil
  2441  		_g_.m.mcache = nil
  2442  		p := allp[0]
  2443  		p.m = nil
  2444  		p.status = _Pidle
  2445  		acquirep(p)
  2446  	}
  2447  	var runnablePs *p
  2448  	for i := new - 1; i >= 0; i-- {
  2449  		p := allp[i]
  2450  		if _g_.m.p == p {
  2451  			continue
  2452  		}
  2453  		p.status = _Pidle
  2454  		if p.runqhead == p.runqtail {
  2455  			pidleput(p)
  2456  		} else {
  2457  			p.m = mget()
  2458  			p.link = runnablePs
  2459  			runnablePs = p
  2460  		}
  2461  	}
  2462  	var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
  2463  	atomicstore((*uint32)(unsafe.Pointer(int32p)), uint32(new))
  2464  	return runnablePs
  2465  }
  2466  
  2467  // Associate p and the current m.
  2468  func acquirep(_p_ *p) {
  2469  	_g_ := getg()
  2470  
  2471  	if _g_.m.p != nil || _g_.m.mcache != nil {
  2472  		throw("acquirep: already in go")
  2473  	}
  2474  	if _p_.m != nil || _p_.status != _Pidle {
  2475  		id := int32(0)
  2476  		if _p_.m != nil {
  2477  			id = _p_.m.id
  2478  		}
  2479  		print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
  2480  		throw("acquirep: invalid p state")
  2481  	}
  2482  	_g_.m.mcache = _p_.mcache
  2483  	_g_.m.p = _p_
  2484  	_p_.m = _g_.m
  2485  	_p_.status = _Prunning
  2486  }
  2487  
  2488  // Disassociate p and the current m.
  2489  func releasep() *p {
  2490  	_g_ := getg()
  2491  
  2492  	if _g_.m.p == nil || _g_.m.mcache == nil {
  2493  		throw("releasep: invalid arg")
  2494  	}
  2495  	_p_ := _g_.m.p
  2496  	if _p_.m != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
  2497  		print("releasep: m=", _g_.m, " m->p=", _g_.m.p, " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
  2498  		throw("releasep: invalid p state")
  2499  	}
  2500  	_g_.m.p = nil
  2501  	_g_.m.mcache = nil
  2502  	_p_.m = nil
  2503  	_p_.status = _Pidle
  2504  	return _p_
  2505  }
  2506  
  2507  func incidlelocked(v int32) {
  2508  	lock(&sched.lock)
  2509  	sched.nmidlelocked += v
  2510  	if v > 0 {
  2511  		checkdead()
  2512  	}
  2513  	unlock(&sched.lock)
  2514  }
  2515  
  2516  // Check for deadlock situation.
  2517  // The check is based on number of running M's, if 0 -> deadlock.
  2518  func checkdead() {
  2519  	// If we are dying because of a signal caught on an already idle thread,
  2520  	// freezetheworld will cause all running threads to block.
  2521  	// And runtime will essentially enter into deadlock state,
  2522  	// except that there is a thread that will call exit soon.
  2523  	if panicking > 0 {
  2524  		return
  2525  	}
  2526  
  2527  	// -1 for sysmon
  2528  	run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1
  2529  	if run > 0 {
  2530  		return
  2531  	}
  2532  	if run < 0 {
  2533  		print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n")
  2534  		throw("checkdead: inconsistent counts")
  2535  	}
  2536  
  2537  	grunning := 0
  2538  	lock(&allglock)
  2539  	for i := 0; i < len(allgs); i++ {
  2540  		gp := allgs[i]
  2541  		if gp.issystem {
  2542  			continue
  2543  		}
  2544  		s := readgstatus(gp)
  2545  		switch s &^ _Gscan {
  2546  		case _Gwaiting:
  2547  			grunning++
  2548  		case _Grunnable,
  2549  			_Grunning,
  2550  			_Gsyscall:
  2551  			unlock(&allglock)
  2552  			print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
  2553  			throw("checkdead: runnable g")
  2554  		}
  2555  	}
  2556  	unlock(&allglock)
  2557  	if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
  2558  		throw("no goroutines (main called runtime.Goexit) - deadlock!")
  2559  	}
  2560  
  2561  	// Maybe jump time forward for playground.
  2562  	gp := timejump()
  2563  	if gp != nil {
  2564  		casgstatus(gp, _Gwaiting, _Grunnable)
  2565  		globrunqput(gp)
  2566  		_p_ := pidleget()
  2567  		if _p_ == nil {
  2568  			throw("checkdead: no p for timer")
  2569  		}
  2570  		mp := mget()
  2571  		if mp == nil {
  2572  			_newm(nil, _p_)
  2573  		} else {
  2574  			mp.nextp = _p_
  2575  			notewakeup(&mp.park)
  2576  		}
  2577  		return
  2578  	}
  2579  
  2580  	getg().m.throwing = -1 // do not dump full stacks
  2581  	throw("all goroutines are asleep - deadlock!")
  2582  }
  2583  
  2584  func sysmon() {
  2585  	// If we go two minutes without a garbage collection, force one to run.
  2586  	forcegcperiod := int64(2 * 60 * 1e9)
  2587  
  2588  	// If a heap span goes unused for 5 minutes after a garbage collection,
  2589  	// we hand it back to the operating system.
  2590  	scavengelimit := int64(5 * 60 * 1e9)
  2591  
  2592  	if debug.scavenge > 0 {
  2593  		// Scavenge-a-lot for testing.
  2594  		forcegcperiod = 10 * 1e6
  2595  		scavengelimit = 20 * 1e6
  2596  	}
  2597  
  2598  	lastscavenge := nanotime()
  2599  	nscavenge := 0
  2600  
  2601  	// Make wake-up period small enough for the sampling to be correct.
  2602  	maxsleep := forcegcperiod / 2
  2603  	if scavengelimit < forcegcperiod {
  2604  		maxsleep = scavengelimit / 2
  2605  	}
  2606  
  2607  	lasttrace := int64(0)
  2608  	idle := 0 // how many cycles in succession we had not wokeup somebody
  2609  	delay := uint32(0)
  2610  	for {
  2611  		if idle == 0 { // start with 20us sleep...
  2612  			delay = 20
  2613  		} else if idle > 50 { // start doubling the sleep after 1ms...
  2614  			delay *= 2
  2615  		}
  2616  		if delay > 10*1000 { // up to 10ms
  2617  			delay = 10 * 1000
  2618  		}
  2619  		usleep(delay)
  2620  		if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic
  2621  			lock(&sched.lock)
  2622  			if atomicload(&sched.gcwaiting) != 0 || atomicload(&sched.npidle) == uint32(gomaxprocs) {
  2623  				atomicstore(&sched.sysmonwait, 1)
  2624  				unlock(&sched.lock)
  2625  				notetsleep(&sched.sysmonnote, maxsleep)
  2626  				lock(&sched.lock)
  2627  				atomicstore(&sched.sysmonwait, 0)
  2628  				noteclear(&sched.sysmonnote)
  2629  				idle = 0
  2630  				delay = 20
  2631  			}
  2632  			unlock(&sched.lock)
  2633  		}
  2634  		// poll network if not polled for more than 10ms
  2635  		lastpoll := int64(atomicload64(&sched.lastpoll))
  2636  		now := nanotime()
  2637  		unixnow := unixnanotime()
  2638  		if lastpoll != 0 && lastpoll+10*1000*1000 < now {
  2639  			cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
  2640  			gp := netpoll(false) // non-blocking - returns list of goroutines
  2641  			if gp != nil {
  2642  				// Need to decrement number of idle locked M's
  2643  				// (pretending that one more is running) before injectglist.
  2644  				// Otherwise it can lead to the following situation:
  2645  				// injectglist grabs all P's but before it starts M's to run the P's,
  2646  				// another M returns from syscall, finishes running its G,
  2647  				// observes that there is no work to do and no other running M's
  2648  				// and reports deadlock.
  2649  				incidlelocked(-1)
  2650  				injectglist(gp)
  2651  				incidlelocked(1)
  2652  			}
  2653  		}
  2654  		// retake P's blocked in syscalls
  2655  		// and preempt long running G's
  2656  		if retake(now) != 0 {
  2657  			idle = 0
  2658  		} else {
  2659  			idle++
  2660  		}
  2661  		// check if we need to force a GC
  2662  		lastgc := int64(atomicload64(&memstats.last_gc))
  2663  		if lastgc != 0 && unixnow-lastgc > forcegcperiod && atomicload(&forcegc.idle) != 0 {
  2664  			lock(&forcegc.lock)
  2665  			forcegc.idle = 0
  2666  			forcegc.g.schedlink = nil
  2667  			injectglist(forcegc.g)
  2668  			unlock(&forcegc.lock)
  2669  		}
  2670  		// scavenge heap once in a while
  2671  		if lastscavenge+scavengelimit/2 < now {
  2672  			mHeap_Scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
  2673  			lastscavenge = now
  2674  			nscavenge++
  2675  		}
  2676  		if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace*1000000) <= now {
  2677  			lasttrace = now
  2678  			schedtrace(debug.scheddetail > 0)
  2679  		}
  2680  	}
  2681  }
  2682  
  2683  var pdesc [_MaxGomaxprocs]struct {
  2684  	schedtick   uint32
  2685  	schedwhen   int64
  2686  	syscalltick uint32
  2687  	syscallwhen int64
  2688  }
  2689  
  2690  func retake(now int64) uint32 {
  2691  	n := 0
  2692  	for i := int32(0); i < gomaxprocs; i++ {
  2693  		_p_ := allp[i]
  2694  		if _p_ == nil {
  2695  			continue
  2696  		}
  2697  		pd := &pdesc[i]
  2698  		s := _p_.status
  2699  		if s == _Psyscall {
  2700  			// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
  2701  			t := int64(_p_.syscalltick)
  2702  			if int64(pd.syscalltick) != t {
  2703  				pd.syscalltick = uint32(t)
  2704  				pd.syscallwhen = now
  2705  				continue
  2706  			}
  2707  			// On the one hand we don't want to retake Ps if there is no other work to do,
  2708  			// but on the other hand we want to retake them eventually
  2709  			// because they can prevent the sysmon thread from deep sleep.
  2710  			if _p_.runqhead == _p_.runqtail && atomicload(&sched.nmspinning)+atomicload(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
  2711  				continue
  2712  			}
  2713  			// Need to decrement number of idle locked M's
  2714  			// (pretending that one more is running) before the CAS.
  2715  			// Otherwise the M from which we retake can exit the syscall,
  2716  			// increment nmidle and report deadlock.
  2717  			incidlelocked(-1)
  2718  			if cas(&_p_.status, s, _Pidle) {
  2719  				n++
  2720  				handoffp(_p_)
  2721  			}
  2722  			incidlelocked(1)
  2723  		} else if s == _Prunning {
  2724  			// Preempt G if it's running for more than 10ms.
  2725  			t := int64(_p_.schedtick)
  2726  			if int64(pd.schedtick) != t {
  2727  				pd.schedtick = uint32(t)
  2728  				pd.schedwhen = now
  2729  				continue
  2730  			}
  2731  			if pd.schedwhen+10*1000*1000 > now {
  2732  				continue
  2733  			}
  2734  			preemptone(_p_)
  2735  		}
  2736  	}
  2737  	return uint32(n)
  2738  }
  2739  
  2740  // Tell all goroutines that they have been preempted and they should stop.
  2741  // This function is purely best-effort.  It can fail to inform a goroutine if a
  2742  // processor just started running it.
  2743  // No locks need to be held.
  2744  // Returns true if preemption request was issued to at least one goroutine.
  2745  func preemptall() bool {
  2746  	res := false
  2747  	for i := int32(0); i < gomaxprocs; i++ {
  2748  		_p_ := allp[i]
  2749  		if _p_ == nil || _p_.status != _Prunning {
  2750  			continue
  2751  		}
  2752  		if preemptone(_p_) {
  2753  			res = true
  2754  		}
  2755  	}
  2756  	return res
  2757  }
  2758  
  2759  // Tell the goroutine running on processor P to stop.
  2760  // This function is purely best-effort.  It can incorrectly fail to inform the
  2761  // goroutine.  It can send inform the wrong goroutine.  Even if it informs the
  2762  // correct goroutine, that goroutine might ignore the request if it is
  2763  // simultaneously executing newstack.
  2764  // No lock needs to be held.
  2765  // Returns true if preemption request was issued.
  2766  // The actual preemption will happen at some point in the future
  2767  // and will be indicated by the gp->status no longer being
  2768  // Grunning
  2769  func preemptone(_p_ *p) bool {
  2770  	mp := _p_.m
  2771  	if mp == nil || mp == getg().m {
  2772  		return false
  2773  	}
  2774  	gp := mp.curg
  2775  	if gp == nil || gp == mp.g0 {
  2776  		return false
  2777  	}
  2778  
  2779  	gp.preempt = true
  2780  
  2781  	// Every call in a go routine checks for stack overflow by
  2782  	// comparing the current stack pointer to gp->stackguard0.
  2783  	// Setting gp->stackguard0 to StackPreempt folds
  2784  	// preemption into the normal stack overflow check.
  2785  	gp.stackguard0 = stackPreempt
  2786  	return true
  2787  }
  2788  
  2789  var starttime int64
  2790  
  2791  func schedtrace(detailed bool) {
  2792  	now := nanotime()
  2793  	if starttime == 0 {
  2794  		starttime = now
  2795  	}
  2796  
  2797  	lock(&sched.lock)
  2798  	print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
  2799  	if detailed {
  2800  		print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
  2801  	}
  2802  	// We must be careful while reading data from P's, M's and G's.
  2803  	// Even if we hold schedlock, most data can be changed concurrently.
  2804  	// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
  2805  	for i := int32(0); i < gomaxprocs; i++ {
  2806  		_p_ := allp[i]
  2807  		if _p_ == nil {
  2808  			continue
  2809  		}
  2810  		mp := _p_.m
  2811  		h := atomicload(&_p_.runqhead)
  2812  		t := atomicload(&_p_.runqtail)
  2813  		if detailed {
  2814  			id := int32(-1)
  2815  			if mp != nil {
  2816  				id = mp.id
  2817  			}
  2818  			print("  P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
  2819  		} else {
  2820  			// In non-detailed mode format lengths of per-P run queues as:
  2821  			// [len1 len2 len3 len4]
  2822  			print(" ")
  2823  			if i == 0 {
  2824  				print("[")
  2825  			}
  2826  			print(t - h)
  2827  			if i == gomaxprocs-1 {
  2828  				print("]\n")
  2829  			}
  2830  		}
  2831  	}
  2832  
  2833  	if !detailed {
  2834  		unlock(&sched.lock)
  2835  		return
  2836  	}
  2837  
  2838  	for mp := allm; mp != nil; mp = mp.alllink {
  2839  		_p_ := mp.p
  2840  		gp := mp.curg
  2841  		lockedg := mp.lockedg
  2842  		id1 := int32(-1)
  2843  		if _p_ != nil {
  2844  			id1 = _p_.id
  2845  		}
  2846  		id2 := int64(-1)
  2847  		if gp != nil {
  2848  			id2 = gp.goid
  2849  		}
  2850  		id3 := int64(-1)
  2851  		if lockedg != nil {
  2852  			id3 = lockedg.goid
  2853  		}
  2854  		print("  M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " gcing=", mp.gcing, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", getg().m.blocked, " lockedg=", id3, "\n")
  2855  	}
  2856  
  2857  	lock(&allglock)
  2858  	for gi := 0; gi < len(allgs); gi++ {
  2859  		gp := allgs[gi]
  2860  		mp := gp.m
  2861  		lockedm := gp.lockedm
  2862  		id1 := int32(-1)
  2863  		if mp != nil {
  2864  			id1 = mp.id
  2865  		}
  2866  		id2 := int32(-1)
  2867  		if lockedm != nil {
  2868  			id2 = lockedm.id
  2869  		}
  2870  		print("  G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
  2871  	}
  2872  	unlock(&allglock)
  2873  	unlock(&sched.lock)
  2874  }
  2875  
  2876  // Put mp on midle list.
  2877  // Sched must be locked.
  2878  func mput(mp *m) {
  2879  	mp.schedlink = sched.midle
  2880  	sched.midle = mp
  2881  	sched.nmidle++
  2882  	checkdead()
  2883  }
  2884  
  2885  // Try to get an m from midle list.
  2886  // Sched must be locked.
  2887  func mget() *m {
  2888  	mp := sched.midle
  2889  	if mp != nil {
  2890  		sched.midle = mp.schedlink
  2891  		sched.nmidle--
  2892  	}
  2893  	return mp
  2894  }
  2895  
  2896  // Put gp on the global runnable queue.
  2897  // Sched must be locked.
  2898  func globrunqput(gp *g) {
  2899  	gp.schedlink = nil
  2900  	if sched.runqtail != nil {
  2901  		sched.runqtail.schedlink = gp
  2902  	} else {
  2903  		sched.runqhead = gp
  2904  	}
  2905  	sched.runqtail = gp
  2906  	sched.runqsize++
  2907  }
  2908  
  2909  // Put a batch of runnable goroutines on the global runnable queue.
  2910  // Sched must be locked.
  2911  func globrunqputbatch(ghead *g, gtail *g, n int32) {
  2912  	gtail.schedlink = nil
  2913  	if sched.runqtail != nil {
  2914  		sched.runqtail.schedlink = ghead
  2915  	} else {
  2916  		sched.runqhead = ghead
  2917  	}
  2918  	sched.runqtail = gtail
  2919  	sched.runqsize += n
  2920  }
  2921  
  2922  // Try get a batch of G's from the global runnable queue.
  2923  // Sched must be locked.
  2924  func globrunqget(_p_ *p, max int32) *g {
  2925  	if sched.runqsize == 0 {
  2926  		return nil
  2927  	}
  2928  
  2929  	n := sched.runqsize/gomaxprocs + 1
  2930  	if n > sched.runqsize {
  2931  		n = sched.runqsize
  2932  	}
  2933  	if max > 0 && n > max {
  2934  		n = max
  2935  	}
  2936  	if n > int32(len(_p_.runq))/2 {
  2937  		n = int32(len(_p_.runq)) / 2
  2938  	}
  2939  
  2940  	sched.runqsize -= n
  2941  	if sched.runqsize == 0 {
  2942  		sched.runqtail = nil
  2943  	}
  2944  
  2945  	gp := sched.runqhead
  2946  	sched.runqhead = gp.schedlink
  2947  	n--
  2948  	for ; n > 0; n-- {
  2949  		gp1 := sched.runqhead
  2950  		sched.runqhead = gp1.schedlink
  2951  		runqput(_p_, gp1)
  2952  	}
  2953  	return gp
  2954  }
  2955  
  2956  // Put p to on _Pidle list.
  2957  // Sched must be locked.
  2958  func pidleput(_p_ *p) {
  2959  	_p_.link = sched.pidle
  2960  	sched.pidle = _p_
  2961  	xadd(&sched.npidle, 1) // TODO: fast atomic
  2962  }
  2963  
  2964  // Try get a p from _Pidle list.
  2965  // Sched must be locked.
  2966  func pidleget() *p {
  2967  	_p_ := sched.pidle
  2968  	if _p_ != nil {
  2969  		sched.pidle = _p_.link
  2970  		xadd(&sched.npidle, -1) // TODO: fast atomic
  2971  	}
  2972  	return _p_
  2973  }
  2974  
  2975  // Try to put g on local runnable queue.
  2976  // If it's full, put onto global queue.
  2977  // Executed only by the owner P.
  2978  func runqput(_p_ *p, gp *g) {
  2979  retry:
  2980  	h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
  2981  	t := _p_.runqtail
  2982  	if t-h < uint32(len(_p_.runq)) {
  2983  		_p_.runq[t%uint32(len(_p_.runq))] = gp
  2984  		atomicstore(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
  2985  		return
  2986  	}
  2987  	if runqputslow(_p_, gp, h, t) {
  2988  		return
  2989  	}
  2990  	// the queue is not full, now the put above must suceed
  2991  	goto retry
  2992  }
  2993  
  2994  // Put g and a batch of work from local runnable queue on global queue.
  2995  // Executed only by the owner P.
  2996  func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
  2997  	var batch [len(_p_.runq)/2 + 1]*g
  2998  
  2999  	// First, grab a batch from local queue.
  3000  	n := t - h
  3001  	n = n / 2
  3002  	if n != uint32(len(_p_.runq)/2) {
  3003  		throw("runqputslow: queue is not full")
  3004  	}
  3005  	for i := uint32(0); i < n; i++ {
  3006  		batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))]
  3007  	}
  3008  	if !cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
  3009  		return false
  3010  	}
  3011  	batch[n] = gp
  3012  
  3013  	// Link the goroutines.
  3014  	for i := uint32(0); i < n; i++ {
  3015  		batch[i].schedlink = batch[i+1]
  3016  	}
  3017  
  3018  	// Now put the batch on global queue.
  3019  	lock(&sched.lock)
  3020  	globrunqputbatch(batch[0], batch[n], int32(n+1))
  3021  	unlock(&sched.lock)
  3022  	return true
  3023  }
  3024  
  3025  // Get g from local runnable queue.
  3026  // Executed only by the owner P.
  3027  func runqget(_p_ *p) *g {
  3028  	for {
  3029  		h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers
  3030  		t := _p_.runqtail
  3031  		if t == h {
  3032  			return nil
  3033  		}
  3034  		gp := _p_.runq[h%uint32(len(_p_.runq))]
  3035  		if cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
  3036  			return gp
  3037  		}
  3038  	}
  3039  }
  3040  
  3041  // Grabs a batch of goroutines from local runnable queue.
  3042  // batch array must be of size len(p->runq)/2. Returns number of grabbed goroutines.
  3043  // Can be executed by any P.
  3044  func runqgrab(_p_ *p, batch []*g) uint32 {
  3045  	for {
  3046  		h := atomicload(&_p_.runqhead) // load-acquire, synchronize with other consumers
  3047  		t := atomicload(&_p_.runqtail) // load-acquire, synchronize with the producer
  3048  		n := t - h
  3049  		n = n - n/2
  3050  		if n == 0 {
  3051  			return 0
  3052  		}
  3053  		if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
  3054  			continue
  3055  		}
  3056  		for i := uint32(0); i < n; i++ {
  3057  			batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))]
  3058  		}
  3059  		if cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
  3060  			return n
  3061  		}
  3062  	}
  3063  }
  3064  
  3065  // Steal half of elements from local runnable queue of p2
  3066  // and put onto local runnable queue of p.
  3067  // Returns one of the stolen elements (or nil if failed).
  3068  func runqsteal(_p_, p2 *p) *g {
  3069  	var batch [len(_p_.runq) / 2]*g
  3070  
  3071  	n := runqgrab(p2, batch[:])
  3072  	if n == 0 {
  3073  		return nil
  3074  	}
  3075  	n--
  3076  	gp := batch[n]
  3077  	if n == 0 {
  3078  		return gp
  3079  	}
  3080  	h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
  3081  	t := _p_.runqtail
  3082  	if t-h+n >= uint32(len(_p_.runq)) {
  3083  		throw("runqsteal: runq overflow")
  3084  	}
  3085  	for i := uint32(0); i < n; i++ {
  3086  		_p_.runq[(t+i)%uint32(len(_p_.runq))] = batch[i]
  3087  	}
  3088  	atomicstore(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
  3089  	return gp
  3090  }
  3091  
  3092  func testSchedLocalQueue() {
  3093  	_p_ := new(p)
  3094  	gs := make([]g, len(_p_.runq))
  3095  	for i := 0; i < len(_p_.runq); i++ {
  3096  		if runqget(_p_) != nil {
  3097  			throw("runq is not empty initially")
  3098  		}
  3099  		for j := 0; j < i; j++ {
  3100  			runqput(_p_, &gs[i])
  3101  		}
  3102  		for j := 0; j < i; j++ {
  3103  			if runqget(_p_) != &gs[i] {
  3104  				print("bad element at iter ", i, "/", j, "\n")
  3105  				throw("bad element")
  3106  			}
  3107  		}
  3108  		if runqget(_p_) != nil {
  3109  			throw("runq is not empty afterwards")
  3110  		}
  3111  	}
  3112  }
  3113  
  3114  func testSchedLocalQueueSteal() {
  3115  	p1 := new(p)
  3116  	p2 := new(p)
  3117  	gs := make([]g, len(p1.runq))
  3118  	for i := 0; i < len(p1.runq); i++ {
  3119  		for j := 0; j < i; j++ {
  3120  			gs[j].sig = 0
  3121  			runqput(p1, &gs[j])
  3122  		}
  3123  		gp := runqsteal(p2, p1)
  3124  		s := 0
  3125  		if gp != nil {
  3126  			s++
  3127  			gp.sig++
  3128  		}
  3129  		for {
  3130  			gp = runqget(p2)
  3131  			if gp == nil {
  3132  				break
  3133  			}
  3134  			s++
  3135  			gp.sig++
  3136  		}
  3137  		for {
  3138  			gp = runqget(p1)
  3139  			if gp == nil {
  3140  				break
  3141  			}
  3142  			gp.sig++
  3143  		}
  3144  		for j := 0; j < i; j++ {
  3145  			if gs[j].sig != 1 {
  3146  				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
  3147  				throw("bad element")
  3148  			}
  3149  		}
  3150  		if s != i/2 && s != i/2+1 {
  3151  			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
  3152  			throw("bad steal")
  3153  		}
  3154  	}
  3155  }
  3156  
  3157  func setMaxThreads(in int) (out int) {
  3158  	lock(&sched.lock)
  3159  	out = int(sched.maxmcount)
  3160  	sched.maxmcount = int32(in)
  3161  	checkmcount()
  3162  	unlock(&sched.lock)
  3163  	return
  3164  }
  3165  
  3166  var goexperiment string = "GOEXPERIMENT" // TODO: defined in zaexperiment.h
  3167  
  3168  func haveexperiment(name string) bool {
  3169  	x := goexperiment
  3170  	for x != "" {
  3171  		xname := ""
  3172  		i := index(x, ",")
  3173  		if i < 0 {
  3174  			xname, x = x, ""
  3175  		} else {
  3176  			xname, x = x[:i], x[i+1:]
  3177  		}
  3178  		if xname == name {
  3179  			return true
  3180  		}
  3181  	}
  3182  	return false
  3183  }
  3184  
  3185  //go:nosplit
  3186  func procPin() int {
  3187  	_g_ := getg()
  3188  	mp := _g_.m
  3189  
  3190  	mp.locks++
  3191  	return int(mp.p.id)
  3192  }
  3193  
  3194  //go:nosplit
  3195  func procUnpin() {
  3196  	_g_ := getg()
  3197  	_g_.m.locks--
  3198  }
  3199  
  3200  //go:linkname sync_runtime_procPin sync.runtime_procPin
  3201  //go:nosplit
  3202  func sync_runtime_procPin() int {
  3203  	return procPin()
  3204  }
  3205  
  3206  //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
  3207  //go:nosplit
  3208  func sync_runtime_procUnpin() {
  3209  	procUnpin()
  3210  }
  3211  
  3212  //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
  3213  //go:nosplit
  3214  func sync_atomic_runtime_procPin() int {
  3215  	return procPin()
  3216  }
  3217  
  3218  //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
  3219  //go:nosplit
  3220  func sync_atomic_runtime_procUnpin() {
  3221  	procUnpin()
  3222  }