github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/proc.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/cpu"
    10  	"internal/goarch"
    11  	"runtime/internal/atomic"
    12  	"runtime/internal/sys"
    13  	"unsafe"
    14  )
    15  
    16  // set using cmd/go/internal/modload.ModInfoProg
    17  var modinfo string
    18  
    19  // Goroutine scheduler
    20  // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
    21  //
    22  // The main concepts are:
    23  // G - goroutine.
    24  // M - worker thread, or machine.
    25  // P - processor, a resource that is required to execute Go code.
    26  //     M must have an associated P to execute Go code, however it can be
    27  //     blocked or in a syscall w/o an associated P.
    28  //
    29  // Design doc at https://golang.org/s/go11sched.
    30  
    31  // Worker thread parking/unparking.
    32  // We need to balance between keeping enough running worker threads to utilize
    33  // available hardware parallelism and parking excessive running worker threads
    34  // to conserve CPU resources and power. This is not simple for two reasons:
    35  // (1) scheduler state is intentionally distributed (in particular, per-P work
    36  // queues), so it is not possible to compute global predicates on fast paths;
    37  // (2) for optimal thread management we would need to know the future (don't park
    38  // a worker thread when a new goroutine will be readied in near future).
    39  //
    40  // Three rejected approaches that would work badly:
    41  // 1. Centralize all scheduler state (would inhibit scalability).
    42  // 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
    43  //    is a spare P, unpark a thread and handoff it the thread and the goroutine.
    44  //    This would lead to thread state thrashing, as the thread that readied the
    45  //    goroutine can be out of work the very next moment, we will need to park it.
    46  //    Also, it would destroy locality of computation as we want to preserve
    47  //    dependent goroutines on the same thread; and introduce additional latency.
    48  // 3. Unpark an additional thread whenever we ready a goroutine and there is an
    49  //    idle P, but don't do handoff. This would lead to excessive thread parking/
    50  //    unparking as the additional threads will instantly park without discovering
    51  //    any work to do.
    52  //
    53  // The current approach:
    54  //
    55  // This approach applies to three primary sources of potential work: readying a
    56  // goroutine, new/modified-earlier timers, and idle-priority GC. See below for
    57  // additional details.
    58  //
    59  // We unpark an additional thread when we submit work if (this is wakep()):
    60  // 1. There is an idle P, and
    61  // 2. There are no "spinning" worker threads.
    62  //
    63  // A worker thread is considered spinning if it is out of local work and did
    64  // not find work in the global run queue or netpoller; the spinning state is
    65  // denoted in m.spinning and in sched.nmspinning. Threads unparked this way are
    66  // also considered spinning; we don't do goroutine handoff so such threads are
    67  // out of work initially. Spinning threads spin on looking for work in per-P
    68  // run queues and timer heaps or from the GC before parking. If a spinning
    69  // thread finds work it takes itself out of the spinning state and proceeds to
    70  // execution. If it does not find work it takes itself out of the spinning
    71  // state and then parks.
    72  //
    73  // If there is at least one spinning thread (sched.nmspinning>1), we don't
    74  // unpark new threads when submitting work. To compensate for that, if the last
    75  // spinning thread finds work and stops spinning, it must unpark a new spinning
    76  // thread. This approach smooths out unjustified spikes of thread unparking,
    77  // but at the same time guarantees eventual maximal CPU parallelism
    78  // utilization.
    79  //
    80  // The main implementation complication is that we need to be very careful
    81  // during spinning->non-spinning thread transition. This transition can race
    82  // with submission of new work, and either one part or another needs to unpark
    83  // another worker thread. If they both fail to do that, we can end up with
    84  // semi-persistent CPU underutilization.
    85  //
    86  // The general pattern for submission is:
    87  // 1. Submit work to the local run queue, timer heap, or GC state.
    88  // 2. #StoreLoad-style memory barrier.
    89  // 3. Check sched.nmspinning.
    90  //
    91  // The general pattern for spinning->non-spinning transition is:
    92  // 1. Decrement nmspinning.
    93  // 2. #StoreLoad-style memory barrier.
    94  // 3. Check all per-P work queues and GC for new work.
    95  //
    96  // Note that all this complexity does not apply to global run queue as we are
    97  // not sloppy about thread unparking when submitting to global queue. Also see
    98  // comments for nmspinning manipulation.
    99  //
   100  // How these different sources of work behave varies, though it doesn't affect
   101  // the synchronization approach:
   102  // * Ready goroutine: this is an obvious source of work; the goroutine is
   103  //   immediately ready and must run on some thread eventually.
   104  // * New/modified-earlier timer: The current timer implementation (see time.go)
   105  //   uses netpoll in a thread with no work available to wait for the soonest
   106  //   timer. If there is no thread waiting, we want a new spinning thread to go
   107  //   wait.
   108  // * Idle-priority GC: The GC wakes a stopped idle thread to contribute to
   109  //   background GC work (note: currently disabled per golang.org/issue/19112).
   110  //   Also see golang.org/issue/44313, as this should be extended to all GC
   111  //   workers.
   112  
   113  var (
   114  	m0           m
   115  	g0           g
   116  	mcache0      *mcache
   117  	raceprocctx0 uintptr
   118  	raceFiniLock mutex
   119  )
   120  
   121  // This slice records the initializing tasks that need to be
   122  // done to start up the runtime. It is built by the linker.
   123  var runtime_inittasks []*initTask
   124  
   125  // main_init_done is a signal used by cgocallbackg that initialization
   126  // has been completed. It is made before _cgo_notify_runtime_init_done,
   127  // so all cgo calls can rely on it existing. When main_init is complete,
   128  // it is closed, meaning cgocallbackg can reliably receive from it.
   129  var main_init_done chan bool
   130  
   131  //go:linkname main_main main.main
   132  func main_main()
   133  
   134  // mainStarted indicates that the main M has started.
   135  var mainStarted bool
   136  
   137  // runtimeInitTime is the nanotime() at which the runtime started.
   138  var runtimeInitTime int64
   139  
   140  // Value to use for signal mask for newly created M's.
   141  var initSigmask sigset
   142  
   143  // The main goroutine.
   144  func main() {
   145  	mp := getg().m
   146  
   147  	// Racectx of m0->g0 is used only as the parent of the main goroutine.
   148  	// It must not be used for anything else.
   149  	mp.g0.racectx = 0
   150  
   151  	// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
   152  	// Using decimal instead of binary GB and MB because
   153  	// they look nicer in the stack overflow failure message.
   154  	if goarch.PtrSize == 8 {
   155  		maxstacksize = 1000000000
   156  	} else {
   157  		maxstacksize = 250000000
   158  	}
   159  
   160  	// An upper limit for max stack size. Used to avoid random crashes
   161  	// after calling SetMaxStack and trying to allocate a stack that is too big,
   162  	// since stackalloc works with 32-bit sizes.
   163  	maxstackceiling = 2 * maxstacksize
   164  
   165  	// Allow newproc to start new Ms.
   166  	mainStarted = true
   167  
   168  	if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon
   169  		systemstack(func() {
   170  			newm(sysmon, nil, -1)
   171  		})
   172  	}
   173  
   174  	// Lock the main goroutine onto this, the main OS thread,
   175  	// during initialization. Most programs won't care, but a few
   176  	// do require certain calls to be made by the main thread.
   177  	// Those can arrange for main.main to run in the main thread
   178  	// by calling runtime.LockOSThread during initialization
   179  	// to preserve the lock.
   180  	lockOSThread()
   181  
   182  	if mp != &m0 {
   183  		throw("runtime.main not on m0")
   184  	}
   185  
   186  	// Record when the world started.
   187  	// Must be before doInit for tracing init.
   188  	runtimeInitTime = nanotime()
   189  	if runtimeInitTime == 0 {
   190  		throw("nanotime returning zero")
   191  	}
   192  
   193  	if debug.inittrace != 0 {
   194  		inittrace.id = getg().goid
   195  		inittrace.active = true
   196  	}
   197  
   198  	doInit(runtime_inittasks) // Must be before defer.
   199  
   200  	// Defer unlock so that runtime.Goexit during init does the unlock too.
   201  	needUnlock := true
   202  	defer func() {
   203  		if needUnlock {
   204  			unlockOSThread()
   205  		}
   206  	}()
   207  
   208  	gcenable()
   209  
   210  	main_init_done = make(chan bool)
   211  	if iscgo {
   212  		if _cgo_thread_start == nil {
   213  			throw("_cgo_thread_start missing")
   214  		}
   215  		if GOOS != "windows" {
   216  			if _cgo_setenv == nil {
   217  				throw("_cgo_setenv missing")
   218  			}
   219  			if _cgo_unsetenv == nil {
   220  				throw("_cgo_unsetenv missing")
   221  			}
   222  		}
   223  		if _cgo_notify_runtime_init_done == nil {
   224  			throw("_cgo_notify_runtime_init_done missing")
   225  		}
   226  		// Start the template thread in case we enter Go from
   227  		// a C-created thread and need to create a new thread.
   228  		startTemplateThread()
   229  		cgocall(_cgo_notify_runtime_init_done, nil)
   230  	}
   231  
   232  	// Run the initializing tasks. Depending on build mode this
   233  	// list can arrive a few different ways, but it will always
   234  	// contain the init tasks computed by the linker for all the
   235  	// packages in the program (excluding those added at runtime
   236  	// by package plugin).
   237  	for _, m := range activeModules() {
   238  		doInit(m.inittasks)
   239  	}
   240  
   241  	// Disable init tracing after main init done to avoid overhead
   242  	// of collecting statistics in malloc and newproc
   243  	inittrace.active = false
   244  
   245  	close(main_init_done)
   246  
   247  	needUnlock = false
   248  	unlockOSThread()
   249  
   250  	if isarchive || islibrary {
   251  		// A program compiled with -buildmode=c-archive or c-shared
   252  		// has a main, but it is not executed.
   253  		return
   254  	}
   255  	fn := main_main // make an indirect call, as the linker doesn't know the address of the main package when laying down the runtime
   256  	fn()
   257  	if raceenabled {
   258  		runExitHooks(0) // run hooks now, since racefini does not return
   259  		racefini()
   260  	}
   261  
   262  	// Make racy client program work: if panicking on
   263  	// another goroutine at the same time as main returns,
   264  	// let the other goroutine finish printing the panic trace.
   265  	// Once it does, it will exit. See issues 3934 and 20018.
   266  	if runningPanicDefers.Load() != 0 {
   267  		// Running deferred functions should not take long.
   268  		for c := 0; c < 1000; c++ {
   269  			if runningPanicDefers.Load() == 0 {
   270  				break
   271  			}
   272  			Gosched()
   273  		}
   274  	}
   275  	if panicking.Load() != 0 {
   276  		gopark(nil, nil, waitReasonPanicWait, traceEvGoStop, 1)
   277  	}
   278  	runExitHooks(0)
   279  
   280  	exit(0)
   281  	for {
   282  		var x *int32
   283  		*x = 0
   284  	}
   285  }
   286  
   287  // os_beforeExit is called from os.Exit(0).
   288  //
   289  //go:linkname os_beforeExit os.runtime_beforeExit
   290  func os_beforeExit(exitCode int) {
   291  	runExitHooks(exitCode)
   292  	if exitCode == 0 && raceenabled {
   293  		racefini()
   294  	}
   295  }
   296  
   297  // start forcegc helper goroutine
   298  func init() {
   299  	go forcegchelper()
   300  }
   301  
   302  func forcegchelper() {
   303  	forcegc.g = getg()
   304  	lockInit(&forcegc.lock, lockRankForcegc)
   305  	for {
   306  		lock(&forcegc.lock)
   307  		if forcegc.idle.Load() {
   308  			throw("forcegc: phase error")
   309  		}
   310  		forcegc.idle.Store(true)
   311  		goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceEvGoBlock, 1)
   312  		// this goroutine is explicitly resumed by sysmon
   313  		if debug.gctrace > 0 {
   314  			println("GC forced")
   315  		}
   316  		// Time-triggered, fully concurrent.
   317  		gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
   318  	}
   319  }
   320  
   321  // Gosched yields the processor, allowing other goroutines to run. It does not
   322  // suspend the current goroutine, so execution resumes automatically.
   323  //
   324  //go:nosplit
   325  func Gosched() {
   326  	checkTimeouts()
   327  	mcall(gosched_m)
   328  }
   329  
   330  // goschedguarded yields the processor like gosched, but also checks
   331  // for forbidden states and opts out of the yield in those cases.
   332  //
   333  //go:nosplit
   334  func goschedguarded() {
   335  	mcall(goschedguarded_m)
   336  }
   337  
   338  // goschedIfBusy yields the processor like gosched, but only does so if
   339  // there are no idle Ps or if we're on the only P and there's nothing in
   340  // the run queue. In both cases, there is freely available idle time.
   341  //
   342  //go:nosplit
   343  func goschedIfBusy() {
   344  	gp := getg()
   345  	// Call gosched if gp.preempt is set; we may be in a tight loop that
   346  	// doesn't otherwise yield.
   347  	if !gp.preempt && sched.npidle.Load() > 0 {
   348  		return
   349  	}
   350  	mcall(gosched_m)
   351  }
   352  
   353  // Puts the current goroutine into a waiting state and calls unlockf on the
   354  // system stack.
   355  //
   356  // If unlockf returns false, the goroutine is resumed.
   357  //
   358  // unlockf must not access this G's stack, as it may be moved between
   359  // the call to gopark and the call to unlockf.
   360  //
   361  // Note that because unlockf is called after putting the G into a waiting
   362  // state, the G may have already been readied by the time unlockf is called
   363  // unless there is external synchronization preventing the G from being
   364  // readied. If unlockf returns false, it must guarantee that the G cannot be
   365  // externally readied.
   366  //
   367  // Reason explains why the goroutine has been parked. It is displayed in stack
   368  // traces and heap dumps. Reasons should be unique and descriptive. Do not
   369  // re-use reasons, add new ones.
   370  func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceEv byte, traceskip int) {
   371  	if reason != waitReasonSleep {
   372  		checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy
   373  	}
   374  	mp := acquirem()
   375  	gp := mp.curg
   376  	status := readgstatus(gp)
   377  	if status != _Grunning && status != _Gscanrunning {
   378  		throw("gopark: bad g status")
   379  	}
   380  	mp.waitlock = lock
   381  	mp.waitunlockf = unlockf
   382  	gp.waitreason = reason
   383  	mp.waittraceev = traceEv
   384  	mp.waittraceskip = traceskip
   385  	releasem(mp)
   386  	// can't do anything that might move the G between Ms here.
   387  	mcall(park_m)
   388  }
   389  
   390  // Puts the current goroutine into a waiting state and unlocks the lock.
   391  // The goroutine can be made runnable again by calling goready(gp).
   392  func goparkunlock(lock *mutex, reason waitReason, traceEv byte, traceskip int) {
   393  	gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
   394  }
   395  
   396  func goready(gp *g, traceskip int) {
   397  	systemstack(func() {
   398  		ready(gp, traceskip, true)
   399  	})
   400  }
   401  
   402  //go:nosplit
   403  func acquireSudog() *sudog {
   404  	// Delicate dance: the semaphore implementation calls
   405  	// acquireSudog, acquireSudog calls new(sudog),
   406  	// new calls malloc, malloc can call the garbage collector,
   407  	// and the garbage collector calls the semaphore implementation
   408  	// in stopTheWorld.
   409  	// Break the cycle by doing acquirem/releasem around new(sudog).
   410  	// The acquirem/releasem increments m.locks during new(sudog),
   411  	// which keeps the garbage collector from being invoked.
   412  	mp := acquirem()
   413  	pp := mp.p.ptr()
   414  	if len(pp.sudogcache) == 0 {
   415  		lock(&sched.sudoglock)
   416  		// First, try to grab a batch from central cache.
   417  		for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
   418  			s := sched.sudogcache
   419  			sched.sudogcache = s.next
   420  			s.next = nil
   421  			pp.sudogcache = append(pp.sudogcache, s)
   422  		}
   423  		unlock(&sched.sudoglock)
   424  		// If the central cache is empty, allocate a new one.
   425  		if len(pp.sudogcache) == 0 {
   426  			pp.sudogcache = append(pp.sudogcache, new(sudog))
   427  		}
   428  	}
   429  	n := len(pp.sudogcache)
   430  	s := pp.sudogcache[n-1]
   431  	pp.sudogcache[n-1] = nil
   432  	pp.sudogcache = pp.sudogcache[:n-1]
   433  	if s.elem != nil {
   434  		throw("acquireSudog: found s.elem != nil in cache")
   435  	}
   436  	releasem(mp)
   437  	return s
   438  }
   439  
   440  //go:nosplit
   441  func releaseSudog(s *sudog) {
   442  	if s.elem != nil {
   443  		throw("runtime: sudog with non-nil elem")
   444  	}
   445  	if s.isSelect {
   446  		throw("runtime: sudog with non-false isSelect")
   447  	}
   448  	if s.next != nil {
   449  		throw("runtime: sudog with non-nil next")
   450  	}
   451  	if s.prev != nil {
   452  		throw("runtime: sudog with non-nil prev")
   453  	}
   454  	if s.waitlink != nil {
   455  		throw("runtime: sudog with non-nil waitlink")
   456  	}
   457  	if s.c != nil {
   458  		throw("runtime: sudog with non-nil c")
   459  	}
   460  	gp := getg()
   461  	if gp.param != nil {
   462  		throw("runtime: releaseSudog with non-nil gp.param")
   463  	}
   464  	mp := acquirem() // avoid rescheduling to another P
   465  	pp := mp.p.ptr()
   466  	if len(pp.sudogcache) == cap(pp.sudogcache) {
   467  		// Transfer half of local cache to the central cache.
   468  		var first, last *sudog
   469  		for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
   470  			n := len(pp.sudogcache)
   471  			p := pp.sudogcache[n-1]
   472  			pp.sudogcache[n-1] = nil
   473  			pp.sudogcache = pp.sudogcache[:n-1]
   474  			if first == nil {
   475  				first = p
   476  			} else {
   477  				last.next = p
   478  			}
   479  			last = p
   480  		}
   481  		lock(&sched.sudoglock)
   482  		last.next = sched.sudogcache
   483  		sched.sudogcache = first
   484  		unlock(&sched.sudoglock)
   485  	}
   486  	pp.sudogcache = append(pp.sudogcache, s)
   487  	releasem(mp)
   488  }
   489  
   490  // called from assembly.
   491  func badmcall(fn func(*g)) {
   492  	throw("runtime: mcall called on m->g0 stack")
   493  }
   494  
   495  func badmcall2(fn func(*g)) {
   496  	throw("runtime: mcall function returned")
   497  }
   498  
   499  func badreflectcall() {
   500  	panic(plainError("arg size to reflect.call more than 1GB"))
   501  }
   502  
   503  //go:nosplit
   504  //go:nowritebarrierrec
   505  func badmorestackg0() {
   506  	writeErrStr("fatal: morestack on g0\n")
   507  }
   508  
   509  //go:nosplit
   510  //go:nowritebarrierrec
   511  func badmorestackgsignal() {
   512  	writeErrStr("fatal: morestack on gsignal\n")
   513  }
   514  
   515  //go:nosplit
   516  func badctxt() {
   517  	throw("ctxt != 0")
   518  }
   519  
   520  func lockedOSThread() bool {
   521  	gp := getg()
   522  	return gp.lockedm != 0 && gp.m.lockedg != 0
   523  }
   524  
   525  var (
   526  	// allgs contains all Gs ever created (including dead Gs), and thus
   527  	// never shrinks.
   528  	//
   529  	// Access via the slice is protected by allglock or stop-the-world.
   530  	// Readers that cannot take the lock may (carefully!) use the atomic
   531  	// variables below.
   532  	allglock mutex
   533  	allgs    []*g
   534  
   535  	// allglen and allgptr are atomic variables that contain len(allgs) and
   536  	// &allgs[0] respectively. Proper ordering depends on totally-ordered
   537  	// loads and stores. Writes are protected by allglock.
   538  	//
   539  	// allgptr is updated before allglen. Readers should read allglen
   540  	// before allgptr to ensure that allglen is always <= len(allgptr). New
   541  	// Gs appended during the race can be missed. For a consistent view of
   542  	// all Gs, allglock must be held.
   543  	//
   544  	// allgptr copies should always be stored as a concrete type or
   545  	// unsafe.Pointer, not uintptr, to ensure that GC can still reach it
   546  	// even if it points to a stale array.
   547  	allglen uintptr
   548  	allgptr **g
   549  )
   550  
   551  func allgadd(gp *g) {
   552  	if readgstatus(gp) == _Gidle {
   553  		throw("allgadd: bad status Gidle")
   554  	}
   555  
   556  	lock(&allglock)
   557  	allgs = append(allgs, gp)
   558  	if &allgs[0] != allgptr {
   559  		atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
   560  	}
   561  	atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
   562  	unlock(&allglock)
   563  }
   564  
   565  // allGsSnapshot returns a snapshot of the slice of all Gs.
   566  //
   567  // The world must be stopped or allglock must be held.
   568  func allGsSnapshot() []*g {
   569  	assertWorldStoppedOrLockHeld(&allglock)
   570  
   571  	// Because the world is stopped or allglock is held, allgadd
   572  	// cannot happen concurrently with this. allgs grows
   573  	// monotonically and existing entries never change, so we can
   574  	// simply return a copy of the slice header. For added safety,
   575  	// we trim everything past len because that can still change.
   576  	return allgs[:len(allgs):len(allgs)]
   577  }
   578  
   579  // atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.
   580  func atomicAllG() (**g, uintptr) {
   581  	length := atomic.Loaduintptr(&allglen)
   582  	ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
   583  	return ptr, length
   584  }
   585  
   586  // atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
   587  func atomicAllGIndex(ptr **g, i uintptr) *g {
   588  	return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
   589  }
   590  
   591  // forEachG calls fn on every G from allgs.
   592  //
   593  // forEachG takes a lock to exclude concurrent addition of new Gs.
   594  func forEachG(fn func(gp *g)) {
   595  	lock(&allglock)
   596  	for _, gp := range allgs {
   597  		fn(gp)
   598  	}
   599  	unlock(&allglock)
   600  }
   601  
   602  // forEachGRace calls fn on every G from allgs.
   603  //
   604  // forEachGRace avoids locking, but does not exclude addition of new Gs during
   605  // execution, which may be missed.
   606  func forEachGRace(fn func(gp *g)) {
   607  	ptr, length := atomicAllG()
   608  	for i := uintptr(0); i < length; i++ {
   609  		gp := atomicAllGIndex(ptr, i)
   610  		fn(gp)
   611  	}
   612  	return
   613  }
   614  
   615  const (
   616  	// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
   617  	// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
   618  	_GoidCacheBatch = 16
   619  )
   620  
   621  // cpuinit sets up CPU feature flags and calls internal/cpu.Initialize. env should be the complete
   622  // value of the GODEBUG environment variable.
   623  func cpuinit(env string) {
   624  	switch GOOS {
   625  	case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
   626  		cpu.DebugOptions = true
   627  	}
   628  	cpu.Initialize(env)
   629  
   630  	// Support cpu feature variables are used in code generated by the compiler
   631  	// to guard execution of instructions that can not be assumed to be always supported.
   632  	switch GOARCH {
   633  	case "386", "amd64":
   634  		x86HasPOPCNT = cpu.X86.HasPOPCNT
   635  		x86HasSSE41 = cpu.X86.HasSSE41
   636  		x86HasFMA = cpu.X86.HasFMA
   637  
   638  	case "arm":
   639  		armHasVFPv4 = cpu.ARM.HasVFPv4
   640  
   641  	case "arm64":
   642  		arm64HasATOMICS = cpu.ARM64.HasATOMICS
   643  	}
   644  }
   645  
   646  // getGodebugEarly extracts the environment variable GODEBUG from the environment on
   647  // Unix-like operating systems and returns it. This function exists to extract GODEBUG
   648  // early before much of the runtime is initialized.
   649  func getGodebugEarly() string {
   650  	const prefix = "GODEBUG="
   651  	var env string
   652  	switch GOOS {
   653  	case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
   654  		// Similar to goenv_unix but extracts the environment value for
   655  		// GODEBUG directly.
   656  		// TODO(moehrmann): remove when general goenvs() can be called before cpuinit()
   657  		n := int32(0)
   658  		for argv_index(argv, argc+1+n) != nil {
   659  			n++
   660  		}
   661  
   662  		for i := int32(0); i < n; i++ {
   663  			p := argv_index(argv, argc+1+i)
   664  			s := unsafe.String(p, findnull(p))
   665  
   666  			if hasPrefix(s, prefix) {
   667  				env = gostring(p)[len(prefix):]
   668  				break
   669  			}
   670  		}
   671  	}
   672  	return env
   673  }
   674  
   675  // The bootstrap sequence is:
   676  //
   677  //	call osinit
   678  //	call schedinit
   679  //	make & queue new G
   680  //	call runtime·mstart
   681  //
   682  // The new G calls runtime·main.
   683  func schedinit() {
   684  	lockInit(&sched.lock, lockRankSched)
   685  	lockInit(&sched.sysmonlock, lockRankSysmon)
   686  	lockInit(&sched.deferlock, lockRankDefer)
   687  	lockInit(&sched.sudoglock, lockRankSudog)
   688  	lockInit(&deadlock, lockRankDeadlock)
   689  	lockInit(&paniclk, lockRankPanic)
   690  	lockInit(&allglock, lockRankAllg)
   691  	lockInit(&allpLock, lockRankAllp)
   692  	lockInit(&reflectOffs.lock, lockRankReflectOffs)
   693  	lockInit(&finlock, lockRankFin)
   694  	lockInit(&cpuprof.lock, lockRankCpuprof)
   695  	traceLockInit()
   696  	// Enforce that this lock is always a leaf lock.
   697  	// All of this lock's critical sections should be
   698  	// extremely short.
   699  	lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
   700  
   701  	// raceinit must be the first call to race detector.
   702  	// In particular, it must be done before mallocinit below calls racemapshadow.
   703  	gp := getg()
   704  	if raceenabled {
   705  		gp.racectx, raceprocctx0 = raceinit()
   706  	}
   707  
   708  	sched.maxmcount = 10000
   709  
   710  	// The world starts stopped.
   711  	worldStopped()
   712  
   713  	moduledataverify()
   714  	stackinit()
   715  	mallocinit()
   716  	godebug := getGodebugEarly()
   717  	initPageTrace(godebug) // must run after mallocinit but before anything allocates
   718  	cpuinit(godebug)       // must run before alginit
   719  	alginit()              // maps, hash, fastrand must not be used before this call
   720  	fastrandinit()         // must run before mcommoninit
   721  	mcommoninit(gp.m, -1)
   722  	modulesinit()   // provides activeModules
   723  	typelinksinit() // uses maps, activeModules
   724  	itabsinit()     // uses activeModules
   725  	stkobjinit()    // must run before GC starts
   726  
   727  	sigsave(&gp.m.sigmask)
   728  	initSigmask = gp.m.sigmask
   729  
   730  	goargs()
   731  	goenvs()
   732  	parsedebugvars()
   733  	gcinit()
   734  
   735  	// if disableMemoryProfiling is set, update MemProfileRate to 0 to turn off memprofile.
   736  	// Note: parsedebugvars may update MemProfileRate, but when disableMemoryProfiling is
   737  	// set to true by the linker, it means that nothing is consuming the profile, it is
   738  	// safe to set MemProfileRate to 0.
   739  	if disableMemoryProfiling {
   740  		MemProfileRate = 0
   741  	}
   742  
   743  	lock(&sched.lock)
   744  	sched.lastpoll.Store(nanotime())
   745  	procs := ncpu
   746  	if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
   747  		procs = n
   748  	}
   749  	if procresize(procs) != nil {
   750  		throw("unknown runnable goroutine during bootstrap")
   751  	}
   752  	unlock(&sched.lock)
   753  
   754  	// World is effectively started now, as P's can run.
   755  	worldStarted()
   756  
   757  	if buildVersion == "" {
   758  		// Condition should never trigger. This code just serves
   759  		// to ensure runtime·buildVersion is kept in the resulting binary.
   760  		buildVersion = "unknown"
   761  	}
   762  	if len(modinfo) == 1 {
   763  		// Condition should never trigger. This code just serves
   764  		// to ensure runtime·modinfo is kept in the resulting binary.
   765  		modinfo = ""
   766  	}
   767  }
   768  
   769  func dumpgstatus(gp *g) {
   770  	thisg := getg()
   771  	print("runtime:   gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   772  	print("runtime: getg:  g=", thisg, ", goid=", thisg.goid, ",  g->atomicstatus=", readgstatus(thisg), "\n")
   773  }
   774  
   775  // sched.lock must be held.
   776  func checkmcount() {
   777  	assertLockHeld(&sched.lock)
   778  
   779  	// Exclude extra M's, which are used for cgocallback from threads
   780  	// created in C.
   781  	//
   782  	// The purpose of the SetMaxThreads limit is to avoid accidental fork
   783  	// bomb from something like millions of goroutines blocking on system
   784  	// calls, causing the runtime to create millions of threads. By
   785  	// definition, this isn't a problem for threads created in C, so we
   786  	// exclude them from the limit. See https://go.dev/issue/60004.
   787  	count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
   788  	if count > sched.maxmcount {
   789  		print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
   790  		throw("thread exhaustion")
   791  	}
   792  }
   793  
   794  // mReserveID returns the next ID to use for a new m. This new m is immediately
   795  // considered 'running' by checkdead.
   796  //
   797  // sched.lock must be held.
   798  func mReserveID() int64 {
   799  	assertLockHeld(&sched.lock)
   800  
   801  	if sched.mnext+1 < sched.mnext {
   802  		throw("runtime: thread ID overflow")
   803  	}
   804  	id := sched.mnext
   805  	sched.mnext++
   806  	checkmcount()
   807  	return id
   808  }
   809  
   810  // Pre-allocated ID may be passed as 'id', or omitted by passing -1.
   811  func mcommoninit(mp *m, id int64) {
   812  	gp := getg()
   813  
   814  	// g0 stack won't make sense for user (and is not necessary unwindable).
   815  	if gp != gp.m.g0 {
   816  		callers(1, mp.createstack[:])
   817  	}
   818  
   819  	lock(&sched.lock)
   820  
   821  	if id >= 0 {
   822  		mp.id = id
   823  	} else {
   824  		mp.id = mReserveID()
   825  	}
   826  
   827  	lo := uint32(int64Hash(uint64(mp.id), fastrandseed))
   828  	hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed))
   829  	if lo|hi == 0 {
   830  		hi = 1
   831  	}
   832  	// Same behavior as for 1.17.
   833  	// TODO: Simplify this.
   834  	if goarch.BigEndian {
   835  		mp.fastrand = uint64(lo)<<32 | uint64(hi)
   836  	} else {
   837  		mp.fastrand = uint64(hi)<<32 | uint64(lo)
   838  	}
   839  
   840  	mpreinit(mp)
   841  	if mp.gsignal != nil {
   842  		mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
   843  	}
   844  
   845  	// Add to allm so garbage collector doesn't free g->m
   846  	// when it is just in a register or thread-local storage.
   847  	mp.alllink = allm
   848  
   849  	// NumCgoCall() iterates over allm w/o schedlock,
   850  	// so we need to publish it safely.
   851  	atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
   852  	unlock(&sched.lock)
   853  
   854  	// Allocate memory to hold a cgo traceback if the cgo call crashes.
   855  	if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
   856  		mp.cgoCallers = new(cgoCallers)
   857  	}
   858  }
   859  
   860  func (mp *m) becomeSpinning() {
   861  	mp.spinning = true
   862  	sched.nmspinning.Add(1)
   863  	sched.needspinning.Store(0)
   864  }
   865  
   866  func (mp *m) hasCgoOnStack() bool {
   867  	return mp.ncgo > 0 || mp.isextra
   868  }
   869  
   870  var fastrandseed uintptr
   871  
   872  func fastrandinit() {
   873  	s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
   874  	getRandomData(s)
   875  }
   876  
   877  // Mark gp ready to run.
   878  func ready(gp *g, traceskip int, next bool) {
   879  	if traceEnabled() {
   880  		traceGoUnpark(gp, traceskip)
   881  	}
   882  
   883  	status := readgstatus(gp)
   884  
   885  	// Mark runnable.
   886  	mp := acquirem() // disable preemption because it can be holding p in a local var
   887  	if status&^_Gscan != _Gwaiting {
   888  		dumpgstatus(gp)
   889  		throw("bad g->status in ready")
   890  	}
   891  
   892  	// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
   893  	casgstatus(gp, _Gwaiting, _Grunnable)
   894  	runqput(mp.p.ptr(), gp, next)
   895  	wakep()
   896  	releasem(mp)
   897  }
   898  
   899  // freezeStopWait is a large value that freezetheworld sets
   900  // sched.stopwait to in order to request that all Gs permanently stop.
   901  const freezeStopWait = 0x7fffffff
   902  
   903  // freezing is set to non-zero if the runtime is trying to freeze the
   904  // world.
   905  var freezing atomic.Bool
   906  
   907  // Similar to stopTheWorld but best-effort and can be called several times.
   908  // There is no reverse operation, used during crashing.
   909  // This function must not lock any mutexes.
   910  func freezetheworld() {
   911  	freezing.Store(true)
   912  	// stopwait and preemption requests can be lost
   913  	// due to races with concurrently executing threads,
   914  	// so try several times
   915  	for i := 0; i < 5; i++ {
   916  		// this should tell the scheduler to not start any new goroutines
   917  		sched.stopwait = freezeStopWait
   918  		sched.gcwaiting.Store(true)
   919  		// this should stop running goroutines
   920  		if !preemptall() {
   921  			break // no running goroutines
   922  		}
   923  		usleep(1000)
   924  	}
   925  	// to be sure
   926  	usleep(1000)
   927  	preemptall()
   928  	usleep(1000)
   929  }
   930  
   931  // All reads and writes of g's status go through readgstatus, casgstatus
   932  // castogscanstatus, casfrom_Gscanstatus.
   933  //
   934  //go:nosplit
   935  func readgstatus(gp *g) uint32 {
   936  	return gp.atomicstatus.Load()
   937  }
   938  
   939  // The Gscanstatuses are acting like locks and this releases them.
   940  // If it proves to be a performance hit we should be able to make these
   941  // simple atomic stores but for now we are going to throw if
   942  // we see an inconsistent state.
   943  func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
   944  	success := false
   945  
   946  	// Check that transition is valid.
   947  	switch oldval {
   948  	default:
   949  		print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
   950  		dumpgstatus(gp)
   951  		throw("casfrom_Gscanstatus:top gp->status is not in scan state")
   952  	case _Gscanrunnable,
   953  		_Gscanwaiting,
   954  		_Gscanrunning,
   955  		_Gscansyscall,
   956  		_Gscanpreempted:
   957  		if newval == oldval&^_Gscan {
   958  			success = gp.atomicstatus.CompareAndSwap(oldval, newval)
   959  		}
   960  	}
   961  	if !success {
   962  		print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
   963  		dumpgstatus(gp)
   964  		throw("casfrom_Gscanstatus: gp->status is not in scan state")
   965  	}
   966  	releaseLockRank(lockRankGscan)
   967  }
   968  
   969  // This will return false if the gp is not in the expected status and the cas fails.
   970  // This acts like a lock acquire while the casfromgstatus acts like a lock release.
   971  func castogscanstatus(gp *g, oldval, newval uint32) bool {
   972  	switch oldval {
   973  	case _Grunnable,
   974  		_Grunning,
   975  		_Gwaiting,
   976  		_Gsyscall:
   977  		if newval == oldval|_Gscan {
   978  			r := gp.atomicstatus.CompareAndSwap(oldval, newval)
   979  			if r {
   980  				acquireLockRank(lockRankGscan)
   981  			}
   982  			return r
   983  
   984  		}
   985  	}
   986  	print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
   987  	throw("castogscanstatus")
   988  	panic("not reached")
   989  }
   990  
   991  // casgstatusAlwaysTrack is a debug flag that causes casgstatus to always track
   992  // various latencies on every transition instead of sampling them.
   993  var casgstatusAlwaysTrack = false
   994  
   995  // If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
   996  // and casfrom_Gscanstatus instead.
   997  // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
   998  // put it in the Gscan state is finished.
   999  //
  1000  //go:nosplit
  1001  func casgstatus(gp *g, oldval, newval uint32) {
  1002  	if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
  1003  		systemstack(func() {
  1004  			print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
  1005  			throw("casgstatus: bad incoming values")
  1006  		})
  1007  	}
  1008  
  1009  	acquireLockRank(lockRankGscan)
  1010  	releaseLockRank(lockRankGscan)
  1011  
  1012  	// See https://golang.org/cl/21503 for justification of the yield delay.
  1013  	const yieldDelay = 5 * 1000
  1014  	var nextYield int64
  1015  
  1016  	// loop if gp->atomicstatus is in a scan state giving
  1017  	// GC time to finish and change the state to oldval.
  1018  	for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
  1019  		if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
  1020  			throw("casgstatus: waiting for Gwaiting but is Grunnable")
  1021  		}
  1022  		if i == 0 {
  1023  			nextYield = nanotime() + yieldDelay
  1024  		}
  1025  		if nanotime() < nextYield {
  1026  			for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
  1027  				procyield(1)
  1028  			}
  1029  		} else {
  1030  			osyield()
  1031  			nextYield = nanotime() + yieldDelay/2
  1032  		}
  1033  	}
  1034  
  1035  	if oldval == _Grunning {
  1036  		// Track every gTrackingPeriod time a goroutine transitions out of running.
  1037  		if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
  1038  			gp.tracking = true
  1039  		}
  1040  		gp.trackingSeq++
  1041  	}
  1042  	if !gp.tracking {
  1043  		return
  1044  	}
  1045  
  1046  	// Handle various kinds of tracking.
  1047  	//
  1048  	// Currently:
  1049  	// - Time spent in runnable.
  1050  	// - Time spent blocked on a sync.Mutex or sync.RWMutex.
  1051  	switch oldval {
  1052  	case _Grunnable:
  1053  		// We transitioned out of runnable, so measure how much
  1054  		// time we spent in this state and add it to
  1055  		// runnableTime.
  1056  		now := nanotime()
  1057  		gp.runnableTime += now - gp.trackingStamp
  1058  		gp.trackingStamp = 0
  1059  	case _Gwaiting:
  1060  		if !gp.waitreason.isMutexWait() {
  1061  			// Not blocking on a lock.
  1062  			break
  1063  		}
  1064  		// Blocking on a lock, measure it. Note that because we're
  1065  		// sampling, we have to multiply by our sampling period to get
  1066  		// a more representative estimate of the absolute value.
  1067  		// gTrackingPeriod also represents an accurate sampling period
  1068  		// because we can only enter this state from _Grunning.
  1069  		now := nanotime()
  1070  		sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
  1071  		gp.trackingStamp = 0
  1072  	}
  1073  	switch newval {
  1074  	case _Gwaiting:
  1075  		if !gp.waitreason.isMutexWait() {
  1076  			// Not blocking on a lock.
  1077  			break
  1078  		}
  1079  		// Blocking on a lock. Write down the timestamp.
  1080  		now := nanotime()
  1081  		gp.trackingStamp = now
  1082  	case _Grunnable:
  1083  		// We just transitioned into runnable, so record what
  1084  		// time that happened.
  1085  		now := nanotime()
  1086  		gp.trackingStamp = now
  1087  	case _Grunning:
  1088  		// We're transitioning into running, so turn off
  1089  		// tracking and record how much time we spent in
  1090  		// runnable.
  1091  		gp.tracking = false
  1092  		sched.timeToRun.record(gp.runnableTime)
  1093  		gp.runnableTime = 0
  1094  	}
  1095  }
  1096  
  1097  // casGToWaiting transitions gp from old to _Gwaiting, and sets the wait reason.
  1098  //
  1099  // Use this over casgstatus when possible to ensure that a waitreason is set.
  1100  func casGToWaiting(gp *g, old uint32, reason waitReason) {
  1101  	// Set the wait reason before calling casgstatus, because casgstatus will use it.
  1102  	gp.waitreason = reason
  1103  	casgstatus(gp, old, _Gwaiting)
  1104  }
  1105  
  1106  // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
  1107  // Returns old status. Cannot call casgstatus directly, because we are racing with an
  1108  // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
  1109  // it might have become Grunnable by the time we get to the cas. If we called casgstatus,
  1110  // it would loop waiting for the status to go back to Gwaiting, which it never will.
  1111  //
  1112  //go:nosplit
  1113  func casgcopystack(gp *g) uint32 {
  1114  	for {
  1115  		oldstatus := readgstatus(gp) &^ _Gscan
  1116  		if oldstatus != _Gwaiting && oldstatus != _Grunnable {
  1117  			throw("copystack: bad status, not Gwaiting or Grunnable")
  1118  		}
  1119  		if gp.atomicstatus.CompareAndSwap(oldstatus, _Gcopystack) {
  1120  			return oldstatus
  1121  		}
  1122  	}
  1123  }
  1124  
  1125  // casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted.
  1126  //
  1127  // TODO(austin): This is the only status operation that both changes
  1128  // the status and locks the _Gscan bit. Rethink this.
  1129  func casGToPreemptScan(gp *g, old, new uint32) {
  1130  	if old != _Grunning || new != _Gscan|_Gpreempted {
  1131  		throw("bad g transition")
  1132  	}
  1133  	acquireLockRank(lockRankGscan)
  1134  	for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
  1135  	}
  1136  }
  1137  
  1138  // casGFromPreempted attempts to transition gp from _Gpreempted to
  1139  // _Gwaiting. If successful, the caller is responsible for
  1140  // re-scheduling gp.
  1141  func casGFromPreempted(gp *g, old, new uint32) bool {
  1142  	if old != _Gpreempted || new != _Gwaiting {
  1143  		throw("bad g transition")
  1144  	}
  1145  	gp.waitreason = waitReasonPreempted
  1146  	return gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting)
  1147  }
  1148  
  1149  // stopTheWorld stops all P's from executing goroutines, interrupting
  1150  // all goroutines at GC safe points and records reason as the reason
  1151  // for the stop. On return, only the current goroutine's P is running.
  1152  // stopTheWorld must not be called from a system stack and the caller
  1153  // must not hold worldsema. The caller must call startTheWorld when
  1154  // other P's should resume execution.
  1155  //
  1156  // stopTheWorld is safe for multiple goroutines to call at the
  1157  // same time. Each will execute its own stop, and the stops will
  1158  // be serialized.
  1159  //
  1160  // This is also used by routines that do stack dumps. If the system is
  1161  // in panic or being exited, this may not reliably stop all
  1162  // goroutines.
  1163  func stopTheWorld(reason string) {
  1164  	semacquire(&worldsema)
  1165  	gp := getg()
  1166  	gp.m.preemptoff = reason
  1167  	systemstack(func() {
  1168  		// Mark the goroutine which called stopTheWorld preemptible so its
  1169  		// stack may be scanned.
  1170  		// This lets a mark worker scan us while we try to stop the world
  1171  		// since otherwise we could get in a mutual preemption deadlock.
  1172  		// We must not modify anything on the G stack because a stack shrink
  1173  		// may occur. A stack shrink is otherwise OK though because in order
  1174  		// to return from this function (and to leave the system stack) we
  1175  		// must have preempted all goroutines, including any attempting
  1176  		// to scan our stack, in which case, any stack shrinking will
  1177  		// have already completed by the time we exit.
  1178  		// Don't provide a wait reason because we're still executing.
  1179  		casGToWaiting(gp, _Grunning, waitReasonStoppingTheWorld)
  1180  		stopTheWorldWithSema()
  1181  		casgstatus(gp, _Gwaiting, _Grunning)
  1182  	})
  1183  }
  1184  
  1185  // startTheWorld undoes the effects of stopTheWorld.
  1186  func startTheWorld() {
  1187  	systemstack(func() { startTheWorldWithSema(false) })
  1188  
  1189  	// worldsema must be held over startTheWorldWithSema to ensure
  1190  	// gomaxprocs cannot change while worldsema is held.
  1191  	//
  1192  	// Release worldsema with direct handoff to the next waiter, but
  1193  	// acquirem so that semrelease1 doesn't try to yield our time.
  1194  	//
  1195  	// Otherwise if e.g. ReadMemStats is being called in a loop,
  1196  	// it might stomp on other attempts to stop the world, such as
  1197  	// for starting or ending GC. The operation this blocks is
  1198  	// so heavy-weight that we should just try to be as fair as
  1199  	// possible here.
  1200  	//
  1201  	// We don't want to just allow us to get preempted between now
  1202  	// and releasing the semaphore because then we keep everyone
  1203  	// (including, for example, GCs) waiting longer.
  1204  	mp := acquirem()
  1205  	mp.preemptoff = ""
  1206  	semrelease1(&worldsema, true, 0)
  1207  	releasem(mp)
  1208  }
  1209  
  1210  // stopTheWorldGC has the same effect as stopTheWorld, but blocks
  1211  // until the GC is not running. It also blocks a GC from starting
  1212  // until startTheWorldGC is called.
  1213  func stopTheWorldGC(reason string) {
  1214  	semacquire(&gcsema)
  1215  	stopTheWorld(reason)
  1216  }
  1217  
  1218  // startTheWorldGC undoes the effects of stopTheWorldGC.
  1219  func startTheWorldGC() {
  1220  	startTheWorld()
  1221  	semrelease(&gcsema)
  1222  }
  1223  
  1224  // Holding worldsema grants an M the right to try to stop the world.
  1225  var worldsema uint32 = 1
  1226  
  1227  // Holding gcsema grants the M the right to block a GC, and blocks
  1228  // until the current GC is done. In particular, it prevents gomaxprocs
  1229  // from changing concurrently.
  1230  //
  1231  // TODO(mknyszek): Once gomaxprocs and the execution tracer can handle
  1232  // being changed/enabled during a GC, remove this.
  1233  var gcsema uint32 = 1
  1234  
  1235  // stopTheWorldWithSema is the core implementation of stopTheWorld.
  1236  // The caller is responsible for acquiring worldsema and disabling
  1237  // preemption first and then should stopTheWorldWithSema on the system
  1238  // stack:
  1239  //
  1240  //	semacquire(&worldsema, 0)
  1241  //	m.preemptoff = "reason"
  1242  //	systemstack(stopTheWorldWithSema)
  1243  //
  1244  // When finished, the caller must either call startTheWorld or undo
  1245  // these three operations separately:
  1246  //
  1247  //	m.preemptoff = ""
  1248  //	systemstack(startTheWorldWithSema)
  1249  //	semrelease(&worldsema)
  1250  //
  1251  // It is allowed to acquire worldsema once and then execute multiple
  1252  // startTheWorldWithSema/stopTheWorldWithSema pairs.
  1253  // Other P's are able to execute between successive calls to
  1254  // startTheWorldWithSema and stopTheWorldWithSema.
  1255  // Holding worldsema causes any other goroutines invoking
  1256  // stopTheWorld to block.
  1257  func stopTheWorldWithSema() {
  1258  	gp := getg()
  1259  
  1260  	// If we hold a lock, then we won't be able to stop another M
  1261  	// that is blocked trying to acquire the lock.
  1262  	if gp.m.locks > 0 {
  1263  		throw("stopTheWorld: holding locks")
  1264  	}
  1265  
  1266  	lock(&sched.lock)
  1267  	sched.stopwait = gomaxprocs
  1268  	sched.gcwaiting.Store(true)
  1269  	preemptall()
  1270  	// stop current P
  1271  	gp.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
  1272  	sched.stopwait--
  1273  	// try to retake all P's in Psyscall status
  1274  	for _, pp := range allp {
  1275  		s := pp.status
  1276  		if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
  1277  			if traceEnabled() {
  1278  				traceGoSysBlock(pp)
  1279  				traceProcStop(pp)
  1280  			}
  1281  			pp.syscalltick++
  1282  			sched.stopwait--
  1283  		}
  1284  	}
  1285  	// stop idle P's
  1286  	now := nanotime()
  1287  	for {
  1288  		pp, _ := pidleget(now)
  1289  		if pp == nil {
  1290  			break
  1291  		}
  1292  		pp.status = _Pgcstop
  1293  		sched.stopwait--
  1294  	}
  1295  	wait := sched.stopwait > 0
  1296  	unlock(&sched.lock)
  1297  
  1298  	// wait for remaining P's to stop voluntarily
  1299  	if wait {
  1300  		for {
  1301  			// wait for 100us, then try to re-preempt in case of any races
  1302  			if notetsleep(&sched.stopnote, 100*1000) {
  1303  				noteclear(&sched.stopnote)
  1304  				break
  1305  			}
  1306  			preemptall()
  1307  		}
  1308  	}
  1309  
  1310  	// sanity checks
  1311  	bad := ""
  1312  	if sched.stopwait != 0 {
  1313  		bad = "stopTheWorld: not stopped (stopwait != 0)"
  1314  	} else {
  1315  		for _, pp := range allp {
  1316  			if pp.status != _Pgcstop {
  1317  				bad = "stopTheWorld: not stopped (status != _Pgcstop)"
  1318  			}
  1319  		}
  1320  	}
  1321  	if freezing.Load() {
  1322  		// Some other thread is panicking. This can cause the
  1323  		// sanity checks above to fail if the panic happens in
  1324  		// the signal handler on a stopped thread. Either way,
  1325  		// we should halt this thread.
  1326  		lock(&deadlock)
  1327  		lock(&deadlock)
  1328  	}
  1329  	if bad != "" {
  1330  		throw(bad)
  1331  	}
  1332  
  1333  	worldStopped()
  1334  }
  1335  
  1336  func startTheWorldWithSema(emitTraceEvent bool) int64 {
  1337  	assertWorldStopped()
  1338  
  1339  	mp := acquirem() // disable preemption because it can be holding p in a local var
  1340  	if netpollinited() {
  1341  		list := netpoll(0) // non-blocking
  1342  		injectglist(&list)
  1343  	}
  1344  	lock(&sched.lock)
  1345  
  1346  	procs := gomaxprocs
  1347  	if newprocs != 0 {
  1348  		procs = newprocs
  1349  		newprocs = 0
  1350  	}
  1351  	p1 := procresize(procs)
  1352  	sched.gcwaiting.Store(false)
  1353  	if sched.sysmonwait.Load() {
  1354  		sched.sysmonwait.Store(false)
  1355  		notewakeup(&sched.sysmonnote)
  1356  	}
  1357  	unlock(&sched.lock)
  1358  
  1359  	worldStarted()
  1360  
  1361  	for p1 != nil {
  1362  		p := p1
  1363  		p1 = p1.link.ptr()
  1364  		if p.m != 0 {
  1365  			mp := p.m.ptr()
  1366  			p.m = 0
  1367  			if mp.nextp != 0 {
  1368  				throw("startTheWorld: inconsistent mp->nextp")
  1369  			}
  1370  			mp.nextp.set(p)
  1371  			notewakeup(&mp.park)
  1372  		} else {
  1373  			// Start M to run P.  Do not start another M below.
  1374  			newm(nil, p, -1)
  1375  		}
  1376  	}
  1377  
  1378  	// Capture start-the-world time before doing clean-up tasks.
  1379  	startTime := nanotime()
  1380  	if emitTraceEvent {
  1381  		traceGCSTWDone()
  1382  	}
  1383  
  1384  	// Wakeup an additional proc in case we have excessive runnable goroutines
  1385  	// in local queues or in the global queue. If we don't, the proc will park itself.
  1386  	// If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
  1387  	wakep()
  1388  
  1389  	releasem(mp)
  1390  
  1391  	return startTime
  1392  }
  1393  
  1394  // usesLibcall indicates whether this runtime performs system calls
  1395  // via libcall.
  1396  func usesLibcall() bool {
  1397  	switch GOOS {
  1398  	case "aix", "darwin", "illumos", "ios", "solaris", "windows":
  1399  		return true
  1400  	case "openbsd":
  1401  		return GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64"
  1402  	}
  1403  	return false
  1404  }
  1405  
  1406  // mStackIsSystemAllocated indicates whether this runtime starts on a
  1407  // system-allocated stack.
  1408  func mStackIsSystemAllocated() bool {
  1409  	switch GOOS {
  1410  	case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
  1411  		return true
  1412  	case "openbsd":
  1413  		switch GOARCH {
  1414  		case "386", "amd64", "arm", "arm64":
  1415  			return true
  1416  		}
  1417  	}
  1418  	return false
  1419  }
  1420  
  1421  // mstart is the entry-point for new Ms.
  1422  // It is written in assembly, uses ABI0, is marked TOPFRAME, and calls mstart0.
  1423  func mstart()
  1424  
  1425  // mstart0 is the Go entry-point for new Ms.
  1426  // This must not split the stack because we may not even have stack
  1427  // bounds set up yet.
  1428  //
  1429  // May run during STW (because it doesn't have a P yet), so write
  1430  // barriers are not allowed.
  1431  //
  1432  //go:nosplit
  1433  //go:nowritebarrierrec
  1434  func mstart0() {
  1435  	gp := getg()
  1436  
  1437  	osStack := gp.stack.lo == 0
  1438  	if osStack {
  1439  		// Initialize stack bounds from system stack.
  1440  		// Cgo may have left stack size in stack.hi.
  1441  		// minit may update the stack bounds.
  1442  		//
  1443  		// Note: these bounds may not be very accurate.
  1444  		// We set hi to &size, but there are things above
  1445  		// it. The 1024 is supposed to compensate this,
  1446  		// but is somewhat arbitrary.
  1447  		size := gp.stack.hi
  1448  		if size == 0 {
  1449  			size = 8192 * sys.StackGuardMultiplier
  1450  		}
  1451  		gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
  1452  		gp.stack.lo = gp.stack.hi - size + 1024
  1453  	}
  1454  	// Initialize stack guard so that we can start calling regular
  1455  	// Go code.
  1456  	gp.stackguard0 = gp.stack.lo + stackGuard
  1457  	// This is the g0, so we can also call go:systemstack
  1458  	// functions, which check stackguard1.
  1459  	gp.stackguard1 = gp.stackguard0
  1460  	mstart1()
  1461  
  1462  	// Exit this thread.
  1463  	if mStackIsSystemAllocated() {
  1464  		// Windows, Solaris, illumos, Darwin, AIX and Plan 9 always system-allocate
  1465  		// the stack, but put it in gp.stack before mstart,
  1466  		// so the logic above hasn't set osStack yet.
  1467  		osStack = true
  1468  	}
  1469  	mexit(osStack)
  1470  }
  1471  
  1472  // The go:noinline is to guarantee the getcallerpc/getcallersp below are safe,
  1473  // so that we can set up g0.sched to return to the call of mstart1 above.
  1474  //
  1475  //go:noinline
  1476  func mstart1() {
  1477  	gp := getg()
  1478  
  1479  	if gp != gp.m.g0 {
  1480  		throw("bad runtime·mstart")
  1481  	}
  1482  
  1483  	// Set up m.g0.sched as a label returning to just
  1484  	// after the mstart1 call in mstart0 above, for use by goexit0 and mcall.
  1485  	// We're never coming back to mstart1 after we call schedule,
  1486  	// so other calls can reuse the current frame.
  1487  	// And goexit0 does a gogo that needs to return from mstart1
  1488  	// and let mstart0 exit the thread.
  1489  	gp.sched.g = guintptr(unsafe.Pointer(gp))
  1490  	gp.sched.pc = getcallerpc()
  1491  	gp.sched.sp = getcallersp()
  1492  
  1493  	asminit()
  1494  	minit()
  1495  
  1496  	// Install signal handlers; after minit so that minit can
  1497  	// prepare the thread to be able to handle the signals.
  1498  	if gp.m == &m0 {
  1499  		mstartm0()
  1500  	}
  1501  
  1502  	if fn := gp.m.mstartfn; fn != nil {
  1503  		fn()
  1504  	}
  1505  
  1506  	if gp.m != &m0 {
  1507  		acquirep(gp.m.nextp.ptr())
  1508  		gp.m.nextp = 0
  1509  	}
  1510  	schedule()
  1511  }
  1512  
  1513  // mstartm0 implements part of mstart1 that only runs on the m0.
  1514  //
  1515  // Write barriers are allowed here because we know the GC can't be
  1516  // running yet, so they'll be no-ops.
  1517  //
  1518  //go:yeswritebarrierrec
  1519  func mstartm0() {
  1520  	// Create an extra M for callbacks on threads not created by Go.
  1521  	// An extra M is also needed on Windows for callbacks created by
  1522  	// syscall.NewCallback. See issue #6751 for details.
  1523  	if (iscgo || GOOS == "windows") && !cgoHasExtraM {
  1524  		cgoHasExtraM = true
  1525  		newextram()
  1526  	}
  1527  	initsig(false)
  1528  }
  1529  
  1530  // mPark causes a thread to park itself, returning once woken.
  1531  //
  1532  //go:nosplit
  1533  func mPark() {
  1534  	gp := getg()
  1535  	notesleep(&gp.m.park)
  1536  	noteclear(&gp.m.park)
  1537  }
  1538  
  1539  // mexit tears down and exits the current thread.
  1540  //
  1541  // Don't call this directly to exit the thread, since it must run at
  1542  // the top of the thread stack. Instead, use gogo(&gp.m.g0.sched) to
  1543  // unwind the stack to the point that exits the thread.
  1544  //
  1545  // It is entered with m.p != nil, so write barriers are allowed. It
  1546  // will release the P before exiting.
  1547  //
  1548  //go:yeswritebarrierrec
  1549  func mexit(osStack bool) {
  1550  	mp := getg().m
  1551  
  1552  	if mp == &m0 {
  1553  		// This is the main thread. Just wedge it.
  1554  		//
  1555  		// On Linux, exiting the main thread puts the process
  1556  		// into a non-waitable zombie state. On Plan 9,
  1557  		// exiting the main thread unblocks wait even though
  1558  		// other threads are still running. On Solaris we can
  1559  		// neither exitThread nor return from mstart. Other
  1560  		// bad things probably happen on other platforms.
  1561  		//
  1562  		// We could try to clean up this M more before wedging
  1563  		// it, but that complicates signal handling.
  1564  		handoffp(releasep())
  1565  		lock(&sched.lock)
  1566  		sched.nmfreed++
  1567  		checkdead()
  1568  		unlock(&sched.lock)
  1569  		mPark()
  1570  		throw("locked m0 woke up")
  1571  	}
  1572  
  1573  	sigblock(true)
  1574  	unminit()
  1575  
  1576  	// Free the gsignal stack.
  1577  	if mp.gsignal != nil {
  1578  		stackfree(mp.gsignal.stack)
  1579  		// On some platforms, when calling into VDSO (e.g. nanotime)
  1580  		// we store our g on the gsignal stack, if there is one.
  1581  		// Now the stack is freed, unlink it from the m, so we
  1582  		// won't write to it when calling VDSO code.
  1583  		mp.gsignal = nil
  1584  	}
  1585  
  1586  	// Remove m from allm.
  1587  	lock(&sched.lock)
  1588  	for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
  1589  		if *pprev == mp {
  1590  			*pprev = mp.alllink
  1591  			goto found
  1592  		}
  1593  	}
  1594  	throw("m not found in allm")
  1595  found:
  1596  	// Delay reaping m until it's done with the stack.
  1597  	//
  1598  	// Put mp on the free list, though it will not be reaped while freeWait
  1599  	// is freeMWait. mp is no longer reachable via allm, so even if it is
  1600  	// on an OS stack, we must keep a reference to mp alive so that the GC
  1601  	// doesn't free mp while we are still using it.
  1602  	//
  1603  	// Note that the free list must not be linked through alllink because
  1604  	// some functions walk allm without locking, so may be using alllink.
  1605  	mp.freeWait.Store(freeMWait)
  1606  	mp.freelink = sched.freem
  1607  	sched.freem = mp
  1608  	unlock(&sched.lock)
  1609  
  1610  	atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
  1611  
  1612  	// Release the P.
  1613  	handoffp(releasep())
  1614  	// After this point we must not have write barriers.
  1615  
  1616  	// Invoke the deadlock detector. This must happen after
  1617  	// handoffp because it may have started a new M to take our
  1618  	// P's work.
  1619  	lock(&sched.lock)
  1620  	sched.nmfreed++
  1621  	checkdead()
  1622  	unlock(&sched.lock)
  1623  
  1624  	if GOOS == "darwin" || GOOS == "ios" {
  1625  		// Make sure pendingPreemptSignals is correct when an M exits.
  1626  		// For #41702.
  1627  		if mp.signalPending.Load() != 0 {
  1628  			pendingPreemptSignals.Add(-1)
  1629  		}
  1630  	}
  1631  
  1632  	// Destroy all allocated resources. After this is called, we may no
  1633  	// longer take any locks.
  1634  	mdestroy(mp)
  1635  
  1636  	if osStack {
  1637  		// No more uses of mp, so it is safe to drop the reference.
  1638  		mp.freeWait.Store(freeMRef)
  1639  
  1640  		// Return from mstart and let the system thread
  1641  		// library free the g0 stack and terminate the thread.
  1642  		return
  1643  	}
  1644  
  1645  	// mstart is the thread's entry point, so there's nothing to
  1646  	// return to. Exit the thread directly. exitThread will clear
  1647  	// m.freeWait when it's done with the stack and the m can be
  1648  	// reaped.
  1649  	exitThread(&mp.freeWait)
  1650  }
  1651  
  1652  // forEachP calls fn(p) for every P p when p reaches a GC safe point.
  1653  // If a P is currently executing code, this will bring the P to a GC
  1654  // safe point and execute fn on that P. If the P is not executing code
  1655  // (it is idle or in a syscall), this will call fn(p) directly while
  1656  // preventing the P from exiting its state. This does not ensure that
  1657  // fn will run on every CPU executing Go code, but it acts as a global
  1658  // memory barrier. GC uses this as a "ragged barrier."
  1659  //
  1660  // The caller must hold worldsema.
  1661  //
  1662  //go:systemstack
  1663  func forEachP(fn func(*p)) {
  1664  	mp := acquirem()
  1665  	pp := getg().m.p.ptr()
  1666  
  1667  	lock(&sched.lock)
  1668  	if sched.safePointWait != 0 {
  1669  		throw("forEachP: sched.safePointWait != 0")
  1670  	}
  1671  	sched.safePointWait = gomaxprocs - 1
  1672  	sched.safePointFn = fn
  1673  
  1674  	// Ask all Ps to run the safe point function.
  1675  	for _, p2 := range allp {
  1676  		if p2 != pp {
  1677  			atomic.Store(&p2.runSafePointFn, 1)
  1678  		}
  1679  	}
  1680  	preemptall()
  1681  
  1682  	// Any P entering _Pidle or _Psyscall from now on will observe
  1683  	// p.runSafePointFn == 1 and will call runSafePointFn when
  1684  	// changing its status to _Pidle/_Psyscall.
  1685  
  1686  	// Run safe point function for all idle Ps. sched.pidle will
  1687  	// not change because we hold sched.lock.
  1688  	for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
  1689  		if atomic.Cas(&p.runSafePointFn, 1, 0) {
  1690  			fn(p)
  1691  			sched.safePointWait--
  1692  		}
  1693  	}
  1694  
  1695  	wait := sched.safePointWait > 0
  1696  	unlock(&sched.lock)
  1697  
  1698  	// Run fn for the current P.
  1699  	fn(pp)
  1700  
  1701  	// Force Ps currently in _Psyscall into _Pidle and hand them
  1702  	// off to induce safe point function execution.
  1703  	for _, p2 := range allp {
  1704  		s := p2.status
  1705  		if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
  1706  			if traceEnabled() {
  1707  				traceGoSysBlock(p2)
  1708  				traceProcStop(p2)
  1709  			}
  1710  			p2.syscalltick++
  1711  			handoffp(p2)
  1712  		}
  1713  	}
  1714  
  1715  	// Wait for remaining Ps to run fn.
  1716  	if wait {
  1717  		for {
  1718  			// Wait for 100us, then try to re-preempt in
  1719  			// case of any races.
  1720  			//
  1721  			// Requires system stack.
  1722  			if notetsleep(&sched.safePointNote, 100*1000) {
  1723  				noteclear(&sched.safePointNote)
  1724  				break
  1725  			}
  1726  			preemptall()
  1727  		}
  1728  	}
  1729  	if sched.safePointWait != 0 {
  1730  		throw("forEachP: not done")
  1731  	}
  1732  	for _, p2 := range allp {
  1733  		if p2.runSafePointFn != 0 {
  1734  			throw("forEachP: P did not run fn")
  1735  		}
  1736  	}
  1737  
  1738  	lock(&sched.lock)
  1739  	sched.safePointFn = nil
  1740  	unlock(&sched.lock)
  1741  	releasem(mp)
  1742  }
  1743  
  1744  // runSafePointFn runs the safe point function, if any, for this P.
  1745  // This should be called like
  1746  //
  1747  //	if getg().m.p.runSafePointFn != 0 {
  1748  //	    runSafePointFn()
  1749  //	}
  1750  //
  1751  // runSafePointFn must be checked on any transition in to _Pidle or
  1752  // _Psyscall to avoid a race where forEachP sees that the P is running
  1753  // just before the P goes into _Pidle/_Psyscall and neither forEachP
  1754  // nor the P run the safe-point function.
  1755  func runSafePointFn() {
  1756  	p := getg().m.p.ptr()
  1757  	// Resolve the race between forEachP running the safe-point
  1758  	// function on this P's behalf and this P running the
  1759  	// safe-point function directly.
  1760  	if !atomic.Cas(&p.runSafePointFn, 1, 0) {
  1761  		return
  1762  	}
  1763  	sched.safePointFn(p)
  1764  	lock(&sched.lock)
  1765  	sched.safePointWait--
  1766  	if sched.safePointWait == 0 {
  1767  		notewakeup(&sched.safePointNote)
  1768  	}
  1769  	unlock(&sched.lock)
  1770  }
  1771  
  1772  // When running with cgo, we call _cgo_thread_start
  1773  // to start threads for us so that we can play nicely with
  1774  // foreign code.
  1775  var cgoThreadStart unsafe.Pointer
  1776  
  1777  type cgothreadstart struct {
  1778  	g   guintptr
  1779  	tls *uint64
  1780  	fn  unsafe.Pointer
  1781  }
  1782  
  1783  // Allocate a new m unassociated with any thread.
  1784  // Can use p for allocation context if needed.
  1785  // fn is recorded as the new m's m.mstartfn.
  1786  // id is optional pre-allocated m ID. Omit by passing -1.
  1787  //
  1788  // This function is allowed to have write barriers even if the caller
  1789  // isn't because it borrows pp.
  1790  //
  1791  //go:yeswritebarrierrec
  1792  func allocm(pp *p, fn func(), id int64) *m {
  1793  	allocmLock.rlock()
  1794  
  1795  	// The caller owns pp, but we may borrow (i.e., acquirep) it. We must
  1796  	// disable preemption to ensure it is not stolen, which would make the
  1797  	// caller lose ownership.
  1798  	acquirem()
  1799  
  1800  	gp := getg()
  1801  	if gp.m.p == 0 {
  1802  		acquirep(pp) // temporarily borrow p for mallocs in this function
  1803  	}
  1804  
  1805  	// Release the free M list. We need to do this somewhere and
  1806  	// this may free up a stack we can use.
  1807  	if sched.freem != nil {
  1808  		lock(&sched.lock)
  1809  		var newList *m
  1810  		for freem := sched.freem; freem != nil; {
  1811  			wait := freem.freeWait.Load()
  1812  			if wait == freeMWait {
  1813  				next := freem.freelink
  1814  				freem.freelink = newList
  1815  				newList = freem
  1816  				freem = next
  1817  				continue
  1818  			}
  1819  			// Free the stack if needed. For freeMRef, there is
  1820  			// nothing to do except drop freem from the sched.freem
  1821  			// list.
  1822  			if wait == freeMStack {
  1823  				// stackfree must be on the system stack, but allocm is
  1824  				// reachable off the system stack transitively from
  1825  				// startm.
  1826  				systemstack(func() {
  1827  					stackfree(freem.g0.stack)
  1828  				})
  1829  			}
  1830  			freem = freem.freelink
  1831  		}
  1832  		sched.freem = newList
  1833  		unlock(&sched.lock)
  1834  	}
  1835  
  1836  	mp := new(m)
  1837  	mp.mstartfn = fn
  1838  	mcommoninit(mp, id)
  1839  
  1840  	// In case of cgo or Solaris or illumos or Darwin, pthread_create will make us a stack.
  1841  	// Windows and Plan 9 will layout sched stack on OS stack.
  1842  	if iscgo || mStackIsSystemAllocated() {
  1843  		mp.g0 = malg(-1)
  1844  	} else {
  1845  		mp.g0 = malg(8192 * sys.StackGuardMultiplier)
  1846  	}
  1847  	mp.g0.m = mp
  1848  
  1849  	if pp == gp.m.p.ptr() {
  1850  		releasep()
  1851  	}
  1852  
  1853  	releasem(gp.m)
  1854  	allocmLock.runlock()
  1855  	return mp
  1856  }
  1857  
  1858  // needm is called when a cgo callback happens on a
  1859  // thread without an m (a thread not created by Go).
  1860  // In this case, needm is expected to find an m to use
  1861  // and return with m, g initialized correctly.
  1862  // Since m and g are not set now (likely nil, but see below)
  1863  // needm is limited in what routines it can call. In particular
  1864  // it can only call nosplit functions (textflag 7) and cannot
  1865  // do any scheduling that requires an m.
  1866  //
  1867  // In order to avoid needing heavy lifting here, we adopt
  1868  // the following strategy: there is a stack of available m's
  1869  // that can be stolen. Using compare-and-swap
  1870  // to pop from the stack has ABA races, so we simulate
  1871  // a lock by doing an exchange (via Casuintptr) to steal the stack
  1872  // head and replace the top pointer with MLOCKED (1).
  1873  // This serves as a simple spin lock that we can use even
  1874  // without an m. The thread that locks the stack in this way
  1875  // unlocks the stack by storing a valid stack head pointer.
  1876  //
  1877  // In order to make sure that there is always an m structure
  1878  // available to be stolen, we maintain the invariant that there
  1879  // is always one more than needed. At the beginning of the
  1880  // program (if cgo is in use) the list is seeded with a single m.
  1881  // If needm finds that it has taken the last m off the list, its job
  1882  // is - once it has installed its own m so that it can do things like
  1883  // allocate memory - to create a spare m and put it on the list.
  1884  //
  1885  // Each of these extra m's also has a g0 and a curg that are
  1886  // pressed into service as the scheduling stack and current
  1887  // goroutine for the duration of the cgo callback.
  1888  //
  1889  // When the callback is done with the m, it calls dropm to
  1890  // put the m back on the list.
  1891  //
  1892  //go:nosplit
  1893  func needm() {
  1894  	if (iscgo || GOOS == "windows") && !cgoHasExtraM {
  1895  		// Can happen if C/C++ code calls Go from a global ctor.
  1896  		// Can also happen on Windows if a global ctor uses a
  1897  		// callback created by syscall.NewCallback. See issue #6751
  1898  		// for details.
  1899  		//
  1900  		// Can not throw, because scheduler is not initialized yet.
  1901  		writeErrStr("fatal error: cgo callback before cgo call\n")
  1902  		exit(1)
  1903  	}
  1904  
  1905  	// Save and block signals before getting an M.
  1906  	// The signal handler may call needm itself,
  1907  	// and we must avoid a deadlock. Also, once g is installed,
  1908  	// any incoming signals will try to execute,
  1909  	// but we won't have the sigaltstack settings and other data
  1910  	// set up appropriately until the end of minit, which will
  1911  	// unblock the signals. This is the same dance as when
  1912  	// starting a new m to run Go code via newosproc.
  1913  	var sigmask sigset
  1914  	sigsave(&sigmask)
  1915  	sigblock(false)
  1916  
  1917  	// nilokay=false is safe here because of the invariant above,
  1918  	// that the extra list always contains or will soon contain
  1919  	// at least one m.
  1920  	mp, last := getExtraM(false)
  1921  
  1922  	// Set needextram when we've just emptied the list,
  1923  	// so that the eventual call into cgocallbackg will
  1924  	// allocate a new m for the extra list. We delay the
  1925  	// allocation until then so that it can be done
  1926  	// after exitsyscall makes sure it is okay to be
  1927  	// running at all (that is, there's no garbage collection
  1928  	// running right now).
  1929  	mp.needextram = last
  1930  
  1931  	// Store the original signal mask for use by minit.
  1932  	mp.sigmask = sigmask
  1933  
  1934  	// Install TLS on some platforms (previously setg
  1935  	// would do this if necessary).
  1936  	osSetupTLS(mp)
  1937  
  1938  	// Install g (= m->g0) and set the stack bounds
  1939  	// to match the current stack. We don't actually know
  1940  	// how big the stack is, like we don't know how big any
  1941  	// scheduling stack is, but we assume there's at least 32 kB,
  1942  	// which is more than enough for us.
  1943  	setg(mp.g0)
  1944  	gp := getg()
  1945  	gp.stack.hi = getcallersp() + 1024
  1946  	gp.stack.lo = getcallersp() - 32*1024
  1947  	gp.stackguard0 = gp.stack.lo + stackGuard
  1948  
  1949  	// Initialize this thread to use the m.
  1950  	asminit()
  1951  	minit()
  1952  
  1953  	// mp.curg is now a real goroutine.
  1954  	casgstatus(mp.curg, _Gdead, _Gsyscall)
  1955  	sched.ngsys.Add(-1)
  1956  }
  1957  
  1958  // newextram allocates m's and puts them on the extra list.
  1959  // It is called with a working local m, so that it can do things
  1960  // like call schedlock and allocate.
  1961  func newextram() {
  1962  	c := extraMWaiters.Swap(0)
  1963  	if c > 0 {
  1964  		for i := uint32(0); i < c; i++ {
  1965  			oneNewExtraM()
  1966  		}
  1967  	} else if extraMLength.Load() == 0 {
  1968  		// Make sure there is at least one extra M.
  1969  		oneNewExtraM()
  1970  	}
  1971  }
  1972  
  1973  // oneNewExtraM allocates an m and puts it on the extra list.
  1974  func oneNewExtraM() {
  1975  	// Create extra goroutine locked to extra m.
  1976  	// The goroutine is the context in which the cgo callback will run.
  1977  	// The sched.pc will never be returned to, but setting it to
  1978  	// goexit makes clear to the traceback routines where
  1979  	// the goroutine stack ends.
  1980  	mp := allocm(nil, nil, -1)
  1981  	gp := malg(4096)
  1982  	gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
  1983  	gp.sched.sp = gp.stack.hi
  1984  	gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
  1985  	gp.sched.lr = 0
  1986  	gp.sched.g = guintptr(unsafe.Pointer(gp))
  1987  	gp.syscallpc = gp.sched.pc
  1988  	gp.syscallsp = gp.sched.sp
  1989  	gp.stktopsp = gp.sched.sp
  1990  	// malg returns status as _Gidle. Change to _Gdead before
  1991  	// adding to allg where GC can see it. We use _Gdead to hide
  1992  	// this from tracebacks and stack scans since it isn't a
  1993  	// "real" goroutine until needm grabs it.
  1994  	casgstatus(gp, _Gidle, _Gdead)
  1995  	gp.m = mp
  1996  	mp.curg = gp
  1997  	mp.isextra = true
  1998  	mp.lockedInt++
  1999  	mp.lockedg.set(gp)
  2000  	gp.lockedm.set(mp)
  2001  	gp.goid = sched.goidgen.Add(1)
  2002  	gp.sysblocktraced = true
  2003  	if raceenabled {
  2004  		gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
  2005  	}
  2006  	if traceEnabled() {
  2007  		// Trigger two trace events for the locked g in the extra m,
  2008  		// since the next event of the g will be traceEvGoSysExit in exitsyscall,
  2009  		// while calling from C thread to Go.
  2010  		traceGoCreate(gp, 0) // no start pc
  2011  		gp.traceseq++
  2012  		traceEvent(traceEvGoInSyscall, -1, gp.goid)
  2013  	}
  2014  	// put on allg for garbage collector
  2015  	allgadd(gp)
  2016  
  2017  	// gp is now on the allg list, but we don't want it to be
  2018  	// counted by gcount. It would be more "proper" to increment
  2019  	// sched.ngfree, but that requires locking. Incrementing ngsys
  2020  	// has the same effect.
  2021  	sched.ngsys.Add(1)
  2022  
  2023  	// Add m to the extra list.
  2024  	addExtraM(mp)
  2025  }
  2026  
  2027  // dropm is called when a cgo callback has called needm but is now
  2028  // done with the callback and returning back into the non-Go thread.
  2029  // It puts the current m back onto the extra list.
  2030  //
  2031  // The main expense here is the call to signalstack to release the
  2032  // m's signal stack, and then the call to needm on the next callback
  2033  // from this thread. It is tempting to try to save the m for next time,
  2034  // which would eliminate both these costs, but there might not be
  2035  // a next time: the current thread (which Go does not control) might exit.
  2036  // If we saved the m for that thread, there would be an m leak each time
  2037  // such a thread exited. Instead, we acquire and release an m on each
  2038  // call. These should typically not be scheduling operations, just a few
  2039  // atomics, so the cost should be small.
  2040  //
  2041  // TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
  2042  // variable using pthread_key_create. Unlike the pthread keys we already use
  2043  // on OS X, this dummy key would never be read by Go code. It would exist
  2044  // only so that we could register at thread-exit-time destructor.
  2045  // That destructor would put the m back onto the extra list.
  2046  // This is purely a performance optimization. The current version,
  2047  // in which dropm happens on each cgo call, is still correct too.
  2048  // We may have to keep the current version on systems with cgo
  2049  // but without pthreads, like Windows.
  2050  func dropm() {
  2051  	// Clear m and g, and return m to the extra list.
  2052  	// After the call to setg we can only call nosplit functions
  2053  	// with no pointer manipulation.
  2054  	mp := getg().m
  2055  
  2056  	// Return mp.curg to dead state.
  2057  	casgstatus(mp.curg, _Gsyscall, _Gdead)
  2058  	mp.curg.preemptStop = false
  2059  	sched.ngsys.Add(1)
  2060  
  2061  	// Block signals before unminit.
  2062  	// Unminit unregisters the signal handling stack (but needs g on some systems).
  2063  	// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
  2064  	// It's important not to try to handle a signal between those two steps.
  2065  	sigmask := mp.sigmask
  2066  	sigblock(false)
  2067  	unminit()
  2068  
  2069  	setg(nil)
  2070  
  2071  	putExtraM(mp)
  2072  
  2073  	msigrestore(sigmask)
  2074  }
  2075  
  2076  // A helper function for EnsureDropM.
  2077  func getm() uintptr {
  2078  	return uintptr(unsafe.Pointer(getg().m))
  2079  }
  2080  
  2081  var (
  2082  	// Locking linked list of extra M's, via mp.schedlink. Must be accessed
  2083  	// only via lockextra/unlockextra.
  2084  	//
  2085  	// Can't be atomic.Pointer[m] because we use an invalid pointer as a
  2086  	// "locked" sentinel value. M's on this list remain visible to the GC
  2087  	// because their mp.curg is on allgs.
  2088  	extraM atomic.Uintptr
  2089  	// Number of M's in the extraM list.
  2090  	extraMLength atomic.Uint32
  2091  	// Number of waiters in lockextra.
  2092  	extraMWaiters atomic.Uint32
  2093  
  2094  	// Number of extra M's in use by threads.
  2095  	extraMInUse atomic.Uint32
  2096  )
  2097  
  2098  // lockextra locks the extra list and returns the list head.
  2099  // The caller must unlock the list by storing a new list head
  2100  // to extram. If nilokay is true, then lockextra will
  2101  // return a nil list head if that's what it finds. If nilokay is false,
  2102  // lockextra will keep waiting until the list head is no longer nil.
  2103  //
  2104  //go:nosplit
  2105  func lockextra(nilokay bool) *m {
  2106  	const locked = 1
  2107  
  2108  	incr := false
  2109  	for {
  2110  		old := extraM.Load()
  2111  		if old == locked {
  2112  			osyield_no_g()
  2113  			continue
  2114  		}
  2115  		if old == 0 && !nilokay {
  2116  			if !incr {
  2117  				// Add 1 to the number of threads
  2118  				// waiting for an M.
  2119  				// This is cleared by newextram.
  2120  				extraMWaiters.Add(1)
  2121  				incr = true
  2122  			}
  2123  			usleep_no_g(1)
  2124  			continue
  2125  		}
  2126  		if extraM.CompareAndSwap(old, locked) {
  2127  			extraMInUse.Add(1)
  2128  			return (*m)(unsafe.Pointer(old))
  2129  		}
  2130  		osyield_no_g()
  2131  		continue
  2132  	}
  2133  }
  2134  
  2135  //go:nosplit
  2136  func unlockextra(mp *m, delta int32) {
  2137  	extraMLength.Add(delta)
  2138  	extraM.Store(uintptr(unsafe.Pointer(mp)))
  2139  }
  2140  
  2141  // Return an M from the extra M list. Returns last == true if the list becomes
  2142  // empty because of this call.
  2143  //
  2144  //go:nosplit
  2145  func getExtraM(nilokay bool) (mp *m, last bool) {
  2146  	mp = lockextra(nilokay)
  2147  	if mp == nil {
  2148  		unlockextra(nil, 0)
  2149  		return nil, true
  2150  	}
  2151  	unlockextra(mp.schedlink.ptr(), -1)
  2152  	return mp, mp.schedlink.ptr() == nil
  2153  }
  2154  
  2155  // Returns an extra M back to the list. mp must be from getExtraM. Newly
  2156  // allocated M's should use addExtraM.
  2157  //
  2158  //go:nosplit
  2159  func putExtraM(mp *m) {
  2160  	extraMInUse.Add(-1)
  2161  	addExtraM(mp)
  2162  }
  2163  
  2164  // Adds a newly allocated M to the extra M list.
  2165  //
  2166  //go:nosplit
  2167  func addExtraM(mp *m) {
  2168  	mnext := lockextra(true)
  2169  	mp.schedlink.set(mnext)
  2170  	unlockextra(mp, 1)
  2171  }
  2172  
  2173  var (
  2174  	// allocmLock is locked for read when creating new Ms in allocm and their
  2175  	// addition to allm. Thus acquiring this lock for write blocks the
  2176  	// creation of new Ms.
  2177  	allocmLock rwmutex
  2178  
  2179  	// execLock serializes exec and clone to avoid bugs or unspecified
  2180  	// behaviour around exec'ing while creating/destroying threads. See
  2181  	// issue #19546.
  2182  	execLock rwmutex
  2183  )
  2184  
  2185  // These errors are reported (via writeErrStr) by some OS-specific
  2186  // versions of newosproc and newosproc0.
  2187  const (
  2188  	failthreadcreate  = "runtime: failed to create new OS thread\n"
  2189  	failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
  2190  )
  2191  
  2192  // newmHandoff contains a list of m structures that need new OS threads.
  2193  // This is used by newm in situations where newm itself can't safely
  2194  // start an OS thread.
  2195  var newmHandoff struct {
  2196  	lock mutex
  2197  
  2198  	// newm points to a list of M structures that need new OS
  2199  	// threads. The list is linked through m.schedlink.
  2200  	newm muintptr
  2201  
  2202  	// waiting indicates that wake needs to be notified when an m
  2203  	// is put on the list.
  2204  	waiting bool
  2205  	wake    note
  2206  
  2207  	// haveTemplateThread indicates that the templateThread has
  2208  	// been started. This is not protected by lock. Use cas to set
  2209  	// to 1.
  2210  	haveTemplateThread uint32
  2211  }
  2212  
  2213  // Create a new m. It will start off with a call to fn, or else the scheduler.
  2214  // fn needs to be static and not a heap allocated closure.
  2215  // May run with m.p==nil, so write barriers are not allowed.
  2216  //
  2217  // id is optional pre-allocated m ID. Omit by passing -1.
  2218  //
  2219  //go:nowritebarrierrec
  2220  func newm(fn func(), pp *p, id int64) {
  2221  	// allocm adds a new M to allm, but they do not start until created by
  2222  	// the OS in newm1 or the template thread.
  2223  	//
  2224  	// doAllThreadsSyscall requires that every M in allm will eventually
  2225  	// start and be signal-able, even with a STW.
  2226  	//
  2227  	// Disable preemption here until we start the thread to ensure that
  2228  	// newm is not preempted between allocm and starting the new thread,
  2229  	// ensuring that anything added to allm is guaranteed to eventually
  2230  	// start.
  2231  	acquirem()
  2232  
  2233  	mp := allocm(pp, fn, id)
  2234  	mp.nextp.set(pp)
  2235  	mp.sigmask = initSigmask
  2236  	if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
  2237  		// We're on a locked M or a thread that may have been
  2238  		// started by C. The kernel state of this thread may
  2239  		// be strange (the user may have locked it for that
  2240  		// purpose). We don't want to clone that into another
  2241  		// thread. Instead, ask a known-good thread to create
  2242  		// the thread for us.
  2243  		//
  2244  		// This is disabled on Plan 9. See golang.org/issue/22227.
  2245  		//
  2246  		// TODO: This may be unnecessary on Windows, which
  2247  		// doesn't model thread creation off fork.
  2248  		lock(&newmHandoff.lock)
  2249  		if newmHandoff.haveTemplateThread == 0 {
  2250  			throw("on a locked thread with no template thread")
  2251  		}
  2252  		mp.schedlink = newmHandoff.newm
  2253  		newmHandoff.newm.set(mp)
  2254  		if newmHandoff.waiting {
  2255  			newmHandoff.waiting = false
  2256  			notewakeup(&newmHandoff.wake)
  2257  		}
  2258  		unlock(&newmHandoff.lock)
  2259  		// The M has not started yet, but the template thread does not
  2260  		// participate in STW, so it will always process queued Ms and
  2261  		// it is safe to releasem.
  2262  		releasem(getg().m)
  2263  		return
  2264  	}
  2265  	newm1(mp)
  2266  	releasem(getg().m)
  2267  }
  2268  
  2269  func newm1(mp *m) {
  2270  	if iscgo {
  2271  		var ts cgothreadstart
  2272  		if _cgo_thread_start == nil {
  2273  			throw("_cgo_thread_start missing")
  2274  		}
  2275  		ts.g.set(mp.g0)
  2276  		ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
  2277  		ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
  2278  		if msanenabled {
  2279  			msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
  2280  		}
  2281  		if asanenabled {
  2282  			asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
  2283  		}
  2284  		execLock.rlock() // Prevent process clone.
  2285  		asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
  2286  		execLock.runlock()
  2287  		return
  2288  	}
  2289  	execLock.rlock() // Prevent process clone.
  2290  	newosproc(mp)
  2291  	execLock.runlock()
  2292  }
  2293  
  2294  // startTemplateThread starts the template thread if it is not already
  2295  // running.
  2296  //
  2297  // The calling thread must itself be in a known-good state.
  2298  func startTemplateThread() {
  2299  	if GOARCH == "wasm" { // no threads on wasm yet
  2300  		return
  2301  	}
  2302  
  2303  	// Disable preemption to guarantee that the template thread will be
  2304  	// created before a park once haveTemplateThread is set.
  2305  	mp := acquirem()
  2306  	if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
  2307  		releasem(mp)
  2308  		return
  2309  	}
  2310  	newm(templateThread, nil, -1)
  2311  	releasem(mp)
  2312  }
  2313  
  2314  // templateThread is a thread in a known-good state that exists solely
  2315  // to start new threads in known-good states when the calling thread
  2316  // may not be in a good state.
  2317  //
  2318  // Many programs never need this, so templateThread is started lazily
  2319  // when we first enter a state that might lead to running on a thread
  2320  // in an unknown state.
  2321  //
  2322  // templateThread runs on an M without a P, so it must not have write
  2323  // barriers.
  2324  //
  2325  //go:nowritebarrierrec
  2326  func templateThread() {
  2327  	lock(&sched.lock)
  2328  	sched.nmsys++
  2329  	checkdead()
  2330  	unlock(&sched.lock)
  2331  
  2332  	for {
  2333  		lock(&newmHandoff.lock)
  2334  		for newmHandoff.newm != 0 {
  2335  			newm := newmHandoff.newm.ptr()
  2336  			newmHandoff.newm = 0
  2337  			unlock(&newmHandoff.lock)
  2338  			for newm != nil {
  2339  				next := newm.schedlink.ptr()
  2340  				newm.schedlink = 0
  2341  				newm1(newm)
  2342  				newm = next
  2343  			}
  2344  			lock(&newmHandoff.lock)
  2345  		}
  2346  		newmHandoff.waiting = true
  2347  		noteclear(&newmHandoff.wake)
  2348  		unlock(&newmHandoff.lock)
  2349  		notesleep(&newmHandoff.wake)
  2350  	}
  2351  }
  2352  
  2353  // Stops execution of the current m until new work is available.
  2354  // Returns with acquired P.
  2355  func stopm() {
  2356  	gp := getg()
  2357  
  2358  	if gp.m.locks != 0 {
  2359  		throw("stopm holding locks")
  2360  	}
  2361  	if gp.m.p != 0 {
  2362  		throw("stopm holding p")
  2363  	}
  2364  	if gp.m.spinning {
  2365  		throw("stopm spinning")
  2366  	}
  2367  
  2368  	lock(&sched.lock)
  2369  	mput(gp.m)
  2370  	unlock(&sched.lock)
  2371  	mPark()
  2372  	acquirep(gp.m.nextp.ptr())
  2373  	gp.m.nextp = 0
  2374  }
  2375  
  2376  func mspinning() {
  2377  	// startm's caller incremented nmspinning. Set the new M's spinning.
  2378  	getg().m.spinning = true
  2379  }
  2380  
  2381  // Schedules some M to run the p (creates an M if necessary).
  2382  // If p==nil, tries to get an idle P, if no idle P's does nothing.
  2383  // May run with m.p==nil, so write barriers are not allowed.
  2384  // If spinning is set, the caller has incremented nmspinning and must provide a
  2385  // P. startm will set m.spinning in the newly started M.
  2386  //
  2387  // Callers passing a non-nil P must call from a non-preemptible context. See
  2388  // comment on acquirem below.
  2389  //
  2390  // Argument lockheld indicates whether the caller already acquired the
  2391  // scheduler lock. Callers holding the lock when making the call must pass
  2392  // true. The lock might be temporarily dropped, but will be reacquired before
  2393  // returning.
  2394  //
  2395  // Must not have write barriers because this may be called without a P.
  2396  //
  2397  //go:nowritebarrierrec
  2398  func startm(pp *p, spinning, lockheld bool) {
  2399  	// Disable preemption.
  2400  	//
  2401  	// Every owned P must have an owner that will eventually stop it in the
  2402  	// event of a GC stop request. startm takes transient ownership of a P
  2403  	// (either from argument or pidleget below) and transfers ownership to
  2404  	// a started M, which will be responsible for performing the stop.
  2405  	//
  2406  	// Preemption must be disabled during this transient ownership,
  2407  	// otherwise the P this is running on may enter GC stop while still
  2408  	// holding the transient P, leaving that P in limbo and deadlocking the
  2409  	// STW.
  2410  	//
  2411  	// Callers passing a non-nil P must already be in non-preemptible
  2412  	// context, otherwise such preemption could occur on function entry to
  2413  	// startm. Callers passing a nil P may be preemptible, so we must
  2414  	// disable preemption before acquiring a P from pidleget below.
  2415  	mp := acquirem()
  2416  	if !lockheld {
  2417  		lock(&sched.lock)
  2418  	}
  2419  	if pp == nil {
  2420  		if spinning {
  2421  			// TODO(prattmic): All remaining calls to this function
  2422  			// with _p_ == nil could be cleaned up to find a P
  2423  			// before calling startm.
  2424  			throw("startm: P required for spinning=true")
  2425  		}
  2426  		pp, _ = pidleget(0)
  2427  		if pp == nil {
  2428  			if !lockheld {
  2429  				unlock(&sched.lock)
  2430  			}
  2431  			releasem(mp)
  2432  			return
  2433  		}
  2434  	}
  2435  	nmp := mget()
  2436  	if nmp == nil {
  2437  		// No M is available, we must drop sched.lock and call newm.
  2438  		// However, we already own a P to assign to the M.
  2439  		//
  2440  		// Once sched.lock is released, another G (e.g., in a syscall),
  2441  		// could find no idle P while checkdead finds a runnable G but
  2442  		// no running M's because this new M hasn't started yet, thus
  2443  		// throwing in an apparent deadlock.
  2444  		// This apparent deadlock is possible when startm is called
  2445  		// from sysmon, which doesn't count as a running M.
  2446  		//
  2447  		// Avoid this situation by pre-allocating the ID for the new M,
  2448  		// thus marking it as 'running' before we drop sched.lock. This
  2449  		// new M will eventually run the scheduler to execute any
  2450  		// queued G's.
  2451  		id := mReserveID()
  2452  		unlock(&sched.lock)
  2453  
  2454  		var fn func()
  2455  		if spinning {
  2456  			// The caller incremented nmspinning, so set m.spinning in the new M.
  2457  			fn = mspinning
  2458  		}
  2459  		newm(fn, pp, id)
  2460  
  2461  		if lockheld {
  2462  			lock(&sched.lock)
  2463  		}
  2464  		// Ownership transfer of pp committed by start in newm.
  2465  		// Preemption is now safe.
  2466  		releasem(mp)
  2467  		return
  2468  	}
  2469  	if !lockheld {
  2470  		unlock(&sched.lock)
  2471  	}
  2472  	if nmp.spinning {
  2473  		throw("startm: m is spinning")
  2474  	}
  2475  	if nmp.nextp != 0 {
  2476  		throw("startm: m has p")
  2477  	}
  2478  	if spinning && !runqempty(pp) {
  2479  		throw("startm: p has runnable gs")
  2480  	}
  2481  	// The caller incremented nmspinning, so set m.spinning in the new M.
  2482  	nmp.spinning = spinning
  2483  	nmp.nextp.set(pp)
  2484  	notewakeup(&nmp.park)
  2485  	// Ownership transfer of pp committed by wakeup. Preemption is now
  2486  	// safe.
  2487  	releasem(mp)
  2488  }
  2489  
  2490  // Hands off P from syscall or locked M.
  2491  // Always runs without a P, so write barriers are not allowed.
  2492  //
  2493  //go:nowritebarrierrec
  2494  func handoffp(pp *p) {
  2495  	// handoffp must start an M in any situation where
  2496  	// findrunnable would return a G to run on pp.
  2497  
  2498  	// if it has local work, start it straight away
  2499  	if !runqempty(pp) || sched.runqsize != 0 {
  2500  		startm(pp, false, false)
  2501  		return
  2502  	}
  2503  	// if there's trace work to do, start it straight away
  2504  	if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
  2505  		startm(pp, false, false)
  2506  		return
  2507  	}
  2508  	// if it has GC work, start it straight away
  2509  	if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
  2510  		startm(pp, false, false)
  2511  		return
  2512  	}
  2513  	// no local work, check that there are no spinning/idle M's,
  2514  	// otherwise our help is not required
  2515  	if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) { // TODO: fast atomic
  2516  		sched.needspinning.Store(0)
  2517  		startm(pp, true, false)
  2518  		return
  2519  	}
  2520  	lock(&sched.lock)
  2521  	if sched.gcwaiting.Load() {
  2522  		pp.status = _Pgcstop
  2523  		sched.stopwait--
  2524  		if sched.stopwait == 0 {
  2525  			notewakeup(&sched.stopnote)
  2526  		}
  2527  		unlock(&sched.lock)
  2528  		return
  2529  	}
  2530  	if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
  2531  		sched.safePointFn(pp)
  2532  		sched.safePointWait--
  2533  		if sched.safePointWait == 0 {
  2534  			notewakeup(&sched.safePointNote)
  2535  		}
  2536  	}
  2537  	if sched.runqsize != 0 {
  2538  		unlock(&sched.lock)
  2539  		startm(pp, false, false)
  2540  		return
  2541  	}
  2542  	// If this is the last running P and nobody is polling network,
  2543  	// need to wakeup another M to poll network.
  2544  	if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
  2545  		unlock(&sched.lock)
  2546  		startm(pp, false, false)
  2547  		return
  2548  	}
  2549  
  2550  	// The scheduler lock cannot be held when calling wakeNetPoller below
  2551  	// because wakeNetPoller may call wakep which may call startm.
  2552  	when := nobarrierWakeTime(pp)
  2553  	pidleput(pp, 0)
  2554  	unlock(&sched.lock)
  2555  
  2556  	if when != 0 {
  2557  		wakeNetPoller(when)
  2558  	}
  2559  }
  2560  
  2561  // Tries to add one more P to execute G's.
  2562  // Called when a G is made runnable (newproc, ready).
  2563  // Must be called with a P.
  2564  func wakep() {
  2565  	// Be conservative about spinning threads, only start one if none exist
  2566  	// already.
  2567  	if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
  2568  		return
  2569  	}
  2570  
  2571  	// Disable preemption until ownership of pp transfers to the next M in
  2572  	// startm. Otherwise preemption here would leave pp stuck waiting to
  2573  	// enter _Pgcstop.
  2574  	//
  2575  	// See preemption comment on acquirem in startm for more details.
  2576  	mp := acquirem()
  2577  
  2578  	var pp *p
  2579  	lock(&sched.lock)
  2580  	pp, _ = pidlegetSpinning(0)
  2581  	if pp == nil {
  2582  		if sched.nmspinning.Add(-1) < 0 {
  2583  			throw("wakep: negative nmspinning")
  2584  		}
  2585  		unlock(&sched.lock)
  2586  		releasem(mp)
  2587  		return
  2588  	}
  2589  	// Since we always have a P, the race in the "No M is available"
  2590  	// comment in startm doesn't apply during the small window between the
  2591  	// unlock here and lock in startm. A checkdead in between will always
  2592  	// see at least one running M (ours).
  2593  	unlock(&sched.lock)
  2594  
  2595  	startm(pp, true, false)
  2596  
  2597  	releasem(mp)
  2598  }
  2599  
  2600  // Stops execution of the current m that is locked to a g until the g is runnable again.
  2601  // Returns with acquired P.
  2602  func stoplockedm() {
  2603  	gp := getg()
  2604  
  2605  	if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
  2606  		throw("stoplockedm: inconsistent locking")
  2607  	}
  2608  	if gp.m.p != 0 {
  2609  		// Schedule another M to run this p.
  2610  		pp := releasep()
  2611  		handoffp(pp)
  2612  	}
  2613  	incidlelocked(1)
  2614  	// Wait until another thread schedules lockedg again.
  2615  	mPark()
  2616  	status := readgstatus(gp.m.lockedg.ptr())
  2617  	if status&^_Gscan != _Grunnable {
  2618  		print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
  2619  		dumpgstatus(gp.m.lockedg.ptr())
  2620  		throw("stoplockedm: not runnable")
  2621  	}
  2622  	acquirep(gp.m.nextp.ptr())
  2623  	gp.m.nextp = 0
  2624  }
  2625  
  2626  // Schedules the locked m to run the locked gp.
  2627  // May run during STW, so write barriers are not allowed.
  2628  //
  2629  //go:nowritebarrierrec
  2630  func startlockedm(gp *g) {
  2631  	mp := gp.lockedm.ptr()
  2632  	if mp == getg().m {
  2633  		throw("startlockedm: locked to me")
  2634  	}
  2635  	if mp.nextp != 0 {
  2636  		throw("startlockedm: m has p")
  2637  	}
  2638  	// directly handoff current P to the locked m
  2639  	incidlelocked(-1)
  2640  	pp := releasep()
  2641  	mp.nextp.set(pp)
  2642  	notewakeup(&mp.park)
  2643  	stopm()
  2644  }
  2645  
  2646  // Stops the current m for stopTheWorld.
  2647  // Returns when the world is restarted.
  2648  func gcstopm() {
  2649  	gp := getg()
  2650  
  2651  	if !sched.gcwaiting.Load() {
  2652  		throw("gcstopm: not waiting for gc")
  2653  	}
  2654  	if gp.m.spinning {
  2655  		gp.m.spinning = false
  2656  		// OK to just drop nmspinning here,
  2657  		// startTheWorld will unpark threads as necessary.
  2658  		if sched.nmspinning.Add(-1) < 0 {
  2659  			throw("gcstopm: negative nmspinning")
  2660  		}
  2661  	}
  2662  	pp := releasep()
  2663  	lock(&sched.lock)
  2664  	pp.status = _Pgcstop
  2665  	sched.stopwait--
  2666  	if sched.stopwait == 0 {
  2667  		notewakeup(&sched.stopnote)
  2668  	}
  2669  	unlock(&sched.lock)
  2670  	stopm()
  2671  }
  2672  
  2673  // Schedules gp to run on the current M.
  2674  // If inheritTime is true, gp inherits the remaining time in the
  2675  // current time slice. Otherwise, it starts a new time slice.
  2676  // Never returns.
  2677  //
  2678  // Write barriers are allowed because this is called immediately after
  2679  // acquiring a P in several places.
  2680  //
  2681  //go:yeswritebarrierrec
  2682  func execute(gp *g, inheritTime bool) {
  2683  	mp := getg().m
  2684  
  2685  	if goroutineProfile.active {
  2686  		// Make sure that gp has had its stack written out to the goroutine
  2687  		// profile, exactly as it was when the goroutine profiler first stopped
  2688  		// the world.
  2689  		tryRecordGoroutineProfile(gp, osyield)
  2690  	}
  2691  
  2692  	// Assign gp.m before entering _Grunning so running Gs have an
  2693  	// M.
  2694  	mp.curg = gp
  2695  	gp.m = mp
  2696  	casgstatus(gp, _Grunnable, _Grunning)
  2697  	gp.waitsince = 0
  2698  	gp.preempt = false
  2699  	gp.stackguard0 = gp.stack.lo + stackGuard
  2700  	if !inheritTime {
  2701  		mp.p.ptr().schedtick++
  2702  	}
  2703  
  2704  	// Check whether the profiler needs to be turned on or off.
  2705  	hz := sched.profilehz
  2706  	if mp.profilehz != hz {
  2707  		setThreadCPUProfiler(hz)
  2708  	}
  2709  
  2710  	if traceEnabled() {
  2711  		// GoSysExit has to happen when we have a P, but before GoStart.
  2712  		// So we emit it here.
  2713  		if gp.syscallsp != 0 && gp.sysblocktraced {
  2714  			traceGoSysExit(gp.sysexitticks)
  2715  		}
  2716  		traceGoStart()
  2717  	}
  2718  
  2719  	gogo(&gp.sched)
  2720  }
  2721  
  2722  // Finds a runnable goroutine to execute.
  2723  // Tries to steal from other P's, get g from local or global queue, poll network.
  2724  // tryWakeP indicates that the returned goroutine is not normal (GC worker, trace
  2725  // reader) so the caller should try to wake a P.
  2726  func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
  2727  	mp := getg().m
  2728  
  2729  	// The conditions here and in handoffp must agree: if
  2730  	// findrunnable would return a G to run, handoffp must start
  2731  	// an M.
  2732  
  2733  top:
  2734  	pp := mp.p.ptr()
  2735  	if sched.gcwaiting.Load() {
  2736  		gcstopm()
  2737  		goto top
  2738  	}
  2739  	if pp.runSafePointFn != 0 {
  2740  		runSafePointFn()
  2741  	}
  2742  
  2743  	// now and pollUntil are saved for work stealing later,
  2744  	// which may steal timers. It's important that between now
  2745  	// and then, nothing blocks, so these numbers remain mostly
  2746  	// relevant.
  2747  	now, pollUntil, _ := checkTimers(pp, 0)
  2748  
  2749  	// Try to schedule the trace reader.
  2750  	if traceEnabled() || traceShuttingDown() {
  2751  		gp := traceReader()
  2752  		if gp != nil {
  2753  			casgstatus(gp, _Gwaiting, _Grunnable)
  2754  			traceGoUnpark(gp, 0)
  2755  			return gp, false, true
  2756  		}
  2757  	}
  2758  
  2759  	// Try to schedule a GC worker.
  2760  	if gcBlackenEnabled != 0 {
  2761  		gp, tnow := gcController.findRunnableGCWorker(pp, now)
  2762  		if gp != nil {
  2763  			return gp, false, true
  2764  		}
  2765  		now = tnow
  2766  	}
  2767  
  2768  	// Check the global runnable queue once in a while to ensure fairness.
  2769  	// Otherwise two goroutines can completely occupy the local runqueue
  2770  	// by constantly respawning each other.
  2771  	if pp.schedtick%61 == 0 && sched.runqsize > 0 {
  2772  		lock(&sched.lock)
  2773  		gp := globrunqget(pp, 1)
  2774  		unlock(&sched.lock)
  2775  		if gp != nil {
  2776  			return gp, false, false
  2777  		}
  2778  	}
  2779  
  2780  	// Wake up the finalizer G.
  2781  	if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
  2782  		if gp := wakefing(); gp != nil {
  2783  			ready(gp, 0, true)
  2784  		}
  2785  	}
  2786  	if *cgo_yield != nil {
  2787  		asmcgocall(*cgo_yield, nil)
  2788  	}
  2789  
  2790  	// local runq
  2791  	if gp, inheritTime := runqget(pp); gp != nil {
  2792  		return gp, inheritTime, false
  2793  	}
  2794  
  2795  	// global runq
  2796  	if sched.runqsize != 0 {
  2797  		lock(&sched.lock)
  2798  		gp := globrunqget(pp, 0)
  2799  		unlock(&sched.lock)
  2800  		if gp != nil {
  2801  			return gp, false, false
  2802  		}
  2803  	}
  2804  
  2805  	// Poll network.
  2806  	// This netpoll is only an optimization before we resort to stealing.
  2807  	// We can safely skip it if there are no waiters or a thread is blocked
  2808  	// in netpoll already. If there is any kind of logical race with that
  2809  	// blocked thread (e.g. it has already returned from netpoll, but does
  2810  	// not set lastpoll yet), this thread will do blocking netpoll below
  2811  	// anyway.
  2812  	if netpollinited() && netpollWaiters.Load() > 0 && sched.lastpoll.Load() != 0 {
  2813  		if list := netpoll(0); !list.empty() { // non-blocking
  2814  			gp := list.pop()
  2815  			injectglist(&list)
  2816  			casgstatus(gp, _Gwaiting, _Grunnable)
  2817  			if traceEnabled() {
  2818  				traceGoUnpark(gp, 0)
  2819  			}
  2820  			return gp, false, false
  2821  		}
  2822  	}
  2823  
  2824  	// Spinning Ms: steal work from other Ps.
  2825  	//
  2826  	// Limit the number of spinning Ms to half the number of busy Ps.
  2827  	// This is necessary to prevent excessive CPU consumption when
  2828  	// GOMAXPROCS>>1 but the program parallelism is low.
  2829  	if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
  2830  		if !mp.spinning {
  2831  			mp.becomeSpinning()
  2832  		}
  2833  
  2834  		gp, inheritTime, tnow, w, newWork := stealWork(now)
  2835  		if gp != nil {
  2836  			// Successfully stole.
  2837  			return gp, inheritTime, false
  2838  		}
  2839  		if newWork {
  2840  			// There may be new timer or GC work; restart to
  2841  			// discover.
  2842  			goto top
  2843  		}
  2844  
  2845  		now = tnow
  2846  		if w != 0 && (pollUntil == 0 || w < pollUntil) {
  2847  			// Earlier timer to wait for.
  2848  			pollUntil = w
  2849  		}
  2850  	}
  2851  
  2852  	// We have nothing to do.
  2853  	//
  2854  	// If we're in the GC mark phase, can safely scan and blacken objects,
  2855  	// and have work to do, run idle-time marking rather than give up the P.
  2856  	if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
  2857  		node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
  2858  		if node != nil {
  2859  			pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
  2860  			gp := node.gp.ptr()
  2861  			casgstatus(gp, _Gwaiting, _Grunnable)
  2862  			if traceEnabled() {
  2863  				traceGoUnpark(gp, 0)
  2864  			}
  2865  			return gp, false, false
  2866  		}
  2867  		gcController.removeIdleMarkWorker()
  2868  	}
  2869  
  2870  	// wasm only:
  2871  	// If a callback returned and no other goroutine is awake,
  2872  	// then wake event handler goroutine which pauses execution
  2873  	// until a callback was triggered.
  2874  	gp, otherReady := beforeIdle(now, pollUntil)
  2875  	if gp != nil {
  2876  		casgstatus(gp, _Gwaiting, _Grunnable)
  2877  		if traceEnabled() {
  2878  			traceGoUnpark(gp, 0)
  2879  		}
  2880  		return gp, false, false
  2881  	}
  2882  	if otherReady {
  2883  		goto top
  2884  	}
  2885  
  2886  	// Before we drop our P, make a snapshot of the allp slice,
  2887  	// which can change underfoot once we no longer block
  2888  	// safe-points. We don't need to snapshot the contents because
  2889  	// everything up to cap(allp) is immutable.
  2890  	allpSnapshot := allp
  2891  	// Also snapshot masks. Value changes are OK, but we can't allow
  2892  	// len to change out from under us.
  2893  	idlepMaskSnapshot := idlepMask
  2894  	timerpMaskSnapshot := timerpMask
  2895  
  2896  	// return P and block
  2897  	lock(&sched.lock)
  2898  	if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
  2899  		unlock(&sched.lock)
  2900  		goto top
  2901  	}
  2902  	if sched.runqsize != 0 {
  2903  		gp := globrunqget(pp, 0)
  2904  		unlock(&sched.lock)
  2905  		return gp, false, false
  2906  	}
  2907  	if !mp.spinning && sched.needspinning.Load() == 1 {
  2908  		// See "Delicate dance" comment below.
  2909  		mp.becomeSpinning()
  2910  		unlock(&sched.lock)
  2911  		goto top
  2912  	}
  2913  	if releasep() != pp {
  2914  		throw("findrunnable: wrong p")
  2915  	}
  2916  	now = pidleput(pp, now)
  2917  	unlock(&sched.lock)
  2918  
  2919  	// Delicate dance: thread transitions from spinning to non-spinning
  2920  	// state, potentially concurrently with submission of new work. We must
  2921  	// drop nmspinning first and then check all sources again (with
  2922  	// #StoreLoad memory barrier in between). If we do it the other way
  2923  	// around, another thread can submit work after we've checked all
  2924  	// sources but before we drop nmspinning; as a result nobody will
  2925  	// unpark a thread to run the work.
  2926  	//
  2927  	// This applies to the following sources of work:
  2928  	//
  2929  	// * Goroutines added to a per-P run queue.
  2930  	// * New/modified-earlier timers on a per-P timer heap.
  2931  	// * Idle-priority GC work (barring golang.org/issue/19112).
  2932  	//
  2933  	// If we discover new work below, we need to restore m.spinning as a
  2934  	// signal for resetspinning to unpark a new worker thread (because
  2935  	// there can be more than one starving goroutine).
  2936  	//
  2937  	// However, if after discovering new work we also observe no idle Ps
  2938  	// (either here or in resetspinning), we have a problem. We may be
  2939  	// racing with a non-spinning M in the block above, having found no
  2940  	// work and preparing to release its P and park. Allowing that P to go
  2941  	// idle will result in loss of work conservation (idle P while there is
  2942  	// runnable work). This could result in complete deadlock in the
  2943  	// unlikely event that we discover new work (from netpoll) right as we
  2944  	// are racing with _all_ other Ps going idle.
  2945  	//
  2946  	// We use sched.needspinning to synchronize with non-spinning Ms going
  2947  	// idle. If needspinning is set when they are about to drop their P,
  2948  	// they abort the drop and instead become a new spinning M on our
  2949  	// behalf. If we are not racing and the system is truly fully loaded
  2950  	// then no spinning threads are required, and the next thread to
  2951  	// naturally become spinning will clear the flag.
  2952  	//
  2953  	// Also see "Worker thread parking/unparking" comment at the top of the
  2954  	// file.
  2955  	wasSpinning := mp.spinning
  2956  	if mp.spinning {
  2957  		mp.spinning = false
  2958  		if sched.nmspinning.Add(-1) < 0 {
  2959  			throw("findrunnable: negative nmspinning")
  2960  		}
  2961  
  2962  		// Note the for correctness, only the last M transitioning from
  2963  		// spinning to non-spinning must perform these rechecks to
  2964  		// ensure no missed work. However, the runtime has some cases
  2965  		// of transient increments of nmspinning that are decremented
  2966  		// without going through this path, so we must be conservative
  2967  		// and perform the check on all spinning Ms.
  2968  		//
  2969  		// See https://go.dev/issue/43997.
  2970  
  2971  		// Check all runqueues once again.
  2972  		pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
  2973  		if pp != nil {
  2974  			acquirep(pp)
  2975  			mp.becomeSpinning()
  2976  			goto top
  2977  		}
  2978  
  2979  		// Check for idle-priority GC work again.
  2980  		pp, gp := checkIdleGCNoP()
  2981  		if pp != nil {
  2982  			acquirep(pp)
  2983  			mp.becomeSpinning()
  2984  
  2985  			// Run the idle worker.
  2986  			pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
  2987  			casgstatus(gp, _Gwaiting, _Grunnable)
  2988  			if traceEnabled() {
  2989  				traceGoUnpark(gp, 0)
  2990  			}
  2991  			return gp, false, false
  2992  		}
  2993  
  2994  		// Finally, check for timer creation or expiry concurrently with
  2995  		// transitioning from spinning to non-spinning.
  2996  		//
  2997  		// Note that we cannot use checkTimers here because it calls
  2998  		// adjusttimers which may need to allocate memory, and that isn't
  2999  		// allowed when we don't have an active P.
  3000  		pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
  3001  	}
  3002  
  3003  	// Poll network until next timer.
  3004  	if netpollinited() && (netpollWaiters.Load() > 0 || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
  3005  		sched.pollUntil.Store(pollUntil)
  3006  		if mp.p != 0 {
  3007  			throw("findrunnable: netpoll with p")
  3008  		}
  3009  		if mp.spinning {
  3010  			throw("findrunnable: netpoll with spinning")
  3011  		}
  3012  		// Refresh now.
  3013  		now = nanotime()
  3014  		delay := int64(-1)
  3015  		if pollUntil != 0 {
  3016  			delay = pollUntil - now
  3017  			if delay < 0 {
  3018  				delay = 0
  3019  			}
  3020  		}
  3021  		if faketime != 0 {
  3022  			// When using fake time, just poll.
  3023  			delay = 0
  3024  		}
  3025  		list := netpoll(delay) // block until new work is available
  3026  		sched.pollUntil.Store(0)
  3027  		sched.lastpoll.Store(now)
  3028  		if faketime != 0 && list.empty() {
  3029  			// Using fake time and nothing is ready; stop M.
  3030  			// When all M's stop, checkdead will call timejump.
  3031  			stopm()
  3032  			goto top
  3033  		}
  3034  		lock(&sched.lock)
  3035  		pp, _ := pidleget(now)
  3036  		unlock(&sched.lock)
  3037  		if pp == nil {
  3038  			injectglist(&list)
  3039  		} else {
  3040  			acquirep(pp)
  3041  			if !list.empty() {
  3042  				gp := list.pop()
  3043  				injectglist(&list)
  3044  				casgstatus(gp, _Gwaiting, _Grunnable)
  3045  				if traceEnabled() {
  3046  					traceGoUnpark(gp, 0)
  3047  				}
  3048  				return gp, false, false
  3049  			}
  3050  			if wasSpinning {
  3051  				mp.becomeSpinning()
  3052  			}
  3053  			goto top
  3054  		}
  3055  	} else if pollUntil != 0 && netpollinited() {
  3056  		pollerPollUntil := sched.pollUntil.Load()
  3057  		if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
  3058  			netpollBreak()
  3059  		}
  3060  	}
  3061  	stopm()
  3062  	goto top
  3063  }
  3064  
  3065  // pollWork reports whether there is non-background work this P could
  3066  // be doing. This is a fairly lightweight check to be used for
  3067  // background work loops, like idle GC. It checks a subset of the
  3068  // conditions checked by the actual scheduler.
  3069  func pollWork() bool {
  3070  	if sched.runqsize != 0 {
  3071  		return true
  3072  	}
  3073  	p := getg().m.p.ptr()
  3074  	if !runqempty(p) {
  3075  		return true
  3076  	}
  3077  	if netpollinited() && netpollWaiters.Load() > 0 && sched.lastpoll.Load() != 0 {
  3078  		if list := netpoll(0); !list.empty() {
  3079  			injectglist(&list)
  3080  			return true
  3081  		}
  3082  	}
  3083  	return false
  3084  }
  3085  
  3086  // stealWork attempts to steal a runnable goroutine or timer from any P.
  3087  //
  3088  // If newWork is true, new work may have been readied.
  3089  //
  3090  // If now is not 0 it is the current time. stealWork returns the passed time or
  3091  // the current time if now was passed as 0.
  3092  func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
  3093  	pp := getg().m.p.ptr()
  3094  
  3095  	ranTimer := false
  3096  
  3097  	const stealTries = 4
  3098  	for i := 0; i < stealTries; i++ {
  3099  		stealTimersOrRunNextG := i == stealTries-1
  3100  
  3101  		for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
  3102  			if sched.gcwaiting.Load() {
  3103  				// GC work may be available.
  3104  				return nil, false, now, pollUntil, true
  3105  			}
  3106  			p2 := allp[enum.position()]
  3107  			if pp == p2 {
  3108  				continue
  3109  			}
  3110  
  3111  			// Steal timers from p2. This call to checkTimers is the only place
  3112  			// where we might hold a lock on a different P's timers. We do this
  3113  			// once on the last pass before checking runnext because stealing
  3114  			// from the other P's runnext should be the last resort, so if there
  3115  			// are timers to steal do that first.
  3116  			//
  3117  			// We only check timers on one of the stealing iterations because
  3118  			// the time stored in now doesn't change in this loop and checking
  3119  			// the timers for each P more than once with the same value of now
  3120  			// is probably a waste of time.
  3121  			//
  3122  			// timerpMask tells us whether the P may have timers at all. If it
  3123  			// can't, no need to check at all.
  3124  			if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
  3125  				tnow, w, ran := checkTimers(p2, now)
  3126  				now = tnow
  3127  				if w != 0 && (pollUntil == 0 || w < pollUntil) {
  3128  					pollUntil = w
  3129  				}
  3130  				if ran {
  3131  					// Running the timers may have
  3132  					// made an arbitrary number of G's
  3133  					// ready and added them to this P's
  3134  					// local run queue. That invalidates
  3135  					// the assumption of runqsteal
  3136  					// that it always has room to add
  3137  					// stolen G's. So check now if there
  3138  					// is a local G to run.
  3139  					if gp, inheritTime := runqget(pp); gp != nil {
  3140  						return gp, inheritTime, now, pollUntil, ranTimer
  3141  					}
  3142  					ranTimer = true
  3143  				}
  3144  			}
  3145  
  3146  			// Don't bother to attempt to steal if p2 is idle.
  3147  			if !idlepMask.read(enum.position()) {
  3148  				if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
  3149  					return gp, false, now, pollUntil, ranTimer
  3150  				}
  3151  			}
  3152  		}
  3153  	}
  3154  
  3155  	// No goroutines found to steal. Regardless, running a timer may have
  3156  	// made some goroutine ready that we missed. Indicate the next timer to
  3157  	// wait for.
  3158  	return nil, false, now, pollUntil, ranTimer
  3159  }
  3160  
  3161  // Check all Ps for a runnable G to steal.
  3162  //
  3163  // On entry we have no P. If a G is available to steal and a P is available,
  3164  // the P is returned which the caller should acquire and attempt to steal the
  3165  // work to.
  3166  func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
  3167  	for id, p2 := range allpSnapshot {
  3168  		if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
  3169  			lock(&sched.lock)
  3170  			pp, _ := pidlegetSpinning(0)
  3171  			if pp == nil {
  3172  				// Can't get a P, don't bother checking remaining Ps.
  3173  				unlock(&sched.lock)
  3174  				return nil
  3175  			}
  3176  			unlock(&sched.lock)
  3177  			return pp
  3178  		}
  3179  	}
  3180  
  3181  	// No work available.
  3182  	return nil
  3183  }
  3184  
  3185  // Check all Ps for a timer expiring sooner than pollUntil.
  3186  //
  3187  // Returns updated pollUntil value.
  3188  func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
  3189  	for id, p2 := range allpSnapshot {
  3190  		if timerpMaskSnapshot.read(uint32(id)) {
  3191  			w := nobarrierWakeTime(p2)
  3192  			if w != 0 && (pollUntil == 0 || w < pollUntil) {
  3193  				pollUntil = w
  3194  			}
  3195  		}
  3196  	}
  3197  
  3198  	return pollUntil
  3199  }
  3200  
  3201  // Check for idle-priority GC, without a P on entry.
  3202  //
  3203  // If some GC work, a P, and a worker G are all available, the P and G will be
  3204  // returned. The returned P has not been wired yet.
  3205  func checkIdleGCNoP() (*p, *g) {
  3206  	// N.B. Since we have no P, gcBlackenEnabled may change at any time; we
  3207  	// must check again after acquiring a P. As an optimization, we also check
  3208  	// if an idle mark worker is needed at all. This is OK here, because if we
  3209  	// observe that one isn't needed, at least one is currently running. Even if
  3210  	// it stops running, its own journey into the scheduler should schedule it
  3211  	// again, if need be (at which point, this check will pass, if relevant).
  3212  	if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
  3213  		return nil, nil
  3214  	}
  3215  	if !gcMarkWorkAvailable(nil) {
  3216  		return nil, nil
  3217  	}
  3218  
  3219  	// Work is available; we can start an idle GC worker only if there is
  3220  	// an available P and available worker G.
  3221  	//
  3222  	// We can attempt to acquire these in either order, though both have
  3223  	// synchronization concerns (see below). Workers are almost always
  3224  	// available (see comment in findRunnableGCWorker for the one case
  3225  	// there may be none). Since we're slightly less likely to find a P,
  3226  	// check for that first.
  3227  	//
  3228  	// Synchronization: note that we must hold sched.lock until we are
  3229  	// committed to keeping it. Otherwise we cannot put the unnecessary P
  3230  	// back in sched.pidle without performing the full set of idle
  3231  	// transition checks.
  3232  	//
  3233  	// If we were to check gcBgMarkWorkerPool first, we must somehow handle
  3234  	// the assumption in gcControllerState.findRunnableGCWorker that an
  3235  	// empty gcBgMarkWorkerPool is only possible if gcMarkDone is running.
  3236  	lock(&sched.lock)
  3237  	pp, now := pidlegetSpinning(0)
  3238  	if pp == nil {
  3239  		unlock(&sched.lock)
  3240  		return nil, nil
  3241  	}
  3242  
  3243  	// Now that we own a P, gcBlackenEnabled can't change (as it requires STW).
  3244  	if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
  3245  		pidleput(pp, now)
  3246  		unlock(&sched.lock)
  3247  		return nil, nil
  3248  	}
  3249  
  3250  	node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
  3251  	if node == nil {
  3252  		pidleput(pp, now)
  3253  		unlock(&sched.lock)
  3254  		gcController.removeIdleMarkWorker()
  3255  		return nil, nil
  3256  	}
  3257  
  3258  	unlock(&sched.lock)
  3259  
  3260  	return pp, node.gp.ptr()
  3261  }
  3262  
  3263  // wakeNetPoller wakes up the thread sleeping in the network poller if it isn't
  3264  // going to wake up before the when argument; or it wakes an idle P to service
  3265  // timers and the network poller if there isn't one already.
  3266  func wakeNetPoller(when int64) {
  3267  	if sched.lastpoll.Load() == 0 {
  3268  		// In findrunnable we ensure that when polling the pollUntil
  3269  		// field is either zero or the time to which the current
  3270  		// poll is expected to run. This can have a spurious wakeup
  3271  		// but should never miss a wakeup.
  3272  		pollerPollUntil := sched.pollUntil.Load()
  3273  		if pollerPollUntil == 0 || pollerPollUntil > when {
  3274  			netpollBreak()
  3275  		}
  3276  	} else {
  3277  		// There are no threads in the network poller, try to get
  3278  		// one there so it can handle new timers.
  3279  		if GOOS != "plan9" { // Temporary workaround - see issue #42303.
  3280  			wakep()
  3281  		}
  3282  	}
  3283  }
  3284  
  3285  func resetspinning() {
  3286  	gp := getg()
  3287  	if !gp.m.spinning {
  3288  		throw("resetspinning: not a spinning m")
  3289  	}
  3290  	gp.m.spinning = false
  3291  	nmspinning := sched.nmspinning.Add(-1)
  3292  	if nmspinning < 0 {
  3293  		throw("findrunnable: negative nmspinning")
  3294  	}
  3295  	// M wakeup policy is deliberately somewhat conservative, so check if we
  3296  	// need to wakeup another P here. See "Worker thread parking/unparking"
  3297  	// comment at the top of the file for details.
  3298  	wakep()
  3299  }
  3300  
  3301  // injectglist adds each runnable G on the list to some run queue,
  3302  // and clears glist. If there is no current P, they are added to the
  3303  // global queue, and up to npidle M's are started to run them.
  3304  // Otherwise, for each idle P, this adds a G to the global queue
  3305  // and starts an M. Any remaining G's are added to the current P's
  3306  // local run queue.
  3307  // This may temporarily acquire sched.lock.
  3308  // Can run concurrently with GC.
  3309  func injectglist(glist *gList) {
  3310  	if glist.empty() {
  3311  		return
  3312  	}
  3313  	if traceEnabled() {
  3314  		for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
  3315  			traceGoUnpark(gp, 0)
  3316  		}
  3317  	}
  3318  
  3319  	// Mark all the goroutines as runnable before we put them
  3320  	// on the run queues.
  3321  	head := glist.head.ptr()
  3322  	var tail *g
  3323  	qsize := 0
  3324  	for gp := head; gp != nil; gp = gp.schedlink.ptr() {
  3325  		tail = gp
  3326  		qsize++
  3327  		casgstatus(gp, _Gwaiting, _Grunnable)
  3328  	}
  3329  
  3330  	// Turn the gList into a gQueue.
  3331  	var q gQueue
  3332  	q.head.set(head)
  3333  	q.tail.set(tail)
  3334  	*glist = gList{}
  3335  
  3336  	startIdle := func(n int) {
  3337  		for i := 0; i < n; i++ {
  3338  			mp := acquirem() // See comment in startm.
  3339  			lock(&sched.lock)
  3340  
  3341  			pp, _ := pidlegetSpinning(0)
  3342  			if pp == nil {
  3343  				unlock(&sched.lock)
  3344  				releasem(mp)
  3345  				break
  3346  			}
  3347  
  3348  			startm(pp, false, true)
  3349  			unlock(&sched.lock)
  3350  			releasem(mp)
  3351  		}
  3352  	}
  3353  
  3354  	pp := getg().m.p.ptr()
  3355  	if pp == nil {
  3356  		lock(&sched.lock)
  3357  		globrunqputbatch(&q, int32(qsize))
  3358  		unlock(&sched.lock)
  3359  		startIdle(qsize)
  3360  		return
  3361  	}
  3362  
  3363  	npidle := int(sched.npidle.Load())
  3364  	var globq gQueue
  3365  	var n int
  3366  	for n = 0; n < npidle && !q.empty(); n++ {
  3367  		g := q.pop()
  3368  		globq.pushBack(g)
  3369  	}
  3370  	if n > 0 {
  3371  		lock(&sched.lock)
  3372  		globrunqputbatch(&globq, int32(n))
  3373  		unlock(&sched.lock)
  3374  		startIdle(n)
  3375  		qsize -= n
  3376  	}
  3377  
  3378  	if !q.empty() {
  3379  		runqputbatch(pp, &q, qsize)
  3380  	}
  3381  }
  3382  
  3383  // One round of scheduler: find a runnable goroutine and execute it.
  3384  // Never returns.
  3385  func schedule() {
  3386  	mp := getg().m
  3387  
  3388  	if mp.locks != 0 {
  3389  		throw("schedule: holding locks")
  3390  	}
  3391  
  3392  	if mp.lockedg != 0 {
  3393  		stoplockedm()
  3394  		execute(mp.lockedg.ptr(), false) // Never returns.
  3395  	}
  3396  
  3397  	// We should not schedule away from a g that is executing a cgo call,
  3398  	// since the cgo call is using the m's g0 stack.
  3399  	if mp.incgo {
  3400  		throw("schedule: in cgo")
  3401  	}
  3402  
  3403  top:
  3404  	pp := mp.p.ptr()
  3405  	pp.preempt = false
  3406  
  3407  	// Safety check: if we are spinning, the run queue should be empty.
  3408  	// Check this before calling checkTimers, as that might call
  3409  	// goready to put a ready goroutine on the local run queue.
  3410  	if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
  3411  		throw("schedule: spinning with local work")
  3412  	}
  3413  
  3414  	gp, inheritTime, tryWakeP := findRunnable() // blocks until work is available
  3415  
  3416  	// This thread is going to run a goroutine and is not spinning anymore,
  3417  	// so if it was marked as spinning we need to reset it now and potentially
  3418  	// start a new spinning M.
  3419  	if mp.spinning {
  3420  		resetspinning()
  3421  	}
  3422  
  3423  	if sched.disable.user && !schedEnabled(gp) {
  3424  		// Scheduling of this goroutine is disabled. Put it on
  3425  		// the list of pending runnable goroutines for when we
  3426  		// re-enable user scheduling and look again.
  3427  		lock(&sched.lock)
  3428  		if schedEnabled(gp) {
  3429  			// Something re-enabled scheduling while we
  3430  			// were acquiring the lock.
  3431  			unlock(&sched.lock)
  3432  		} else {
  3433  			sched.disable.runnable.pushBack(gp)
  3434  			sched.disable.n++
  3435  			unlock(&sched.lock)
  3436  			goto top
  3437  		}
  3438  	}
  3439  
  3440  	// If about to schedule a not-normal goroutine (a GCworker or tracereader),
  3441  	// wake a P if there is one.
  3442  	if tryWakeP {
  3443  		wakep()
  3444  	}
  3445  	if gp.lockedm != 0 {
  3446  		// Hands off own p to the locked m,
  3447  		// then blocks waiting for a new p.
  3448  		startlockedm(gp)
  3449  		goto top
  3450  	}
  3451  
  3452  	execute(gp, inheritTime)
  3453  }
  3454  
  3455  // dropg removes the association between m and the current goroutine m->curg (gp for short).
  3456  // Typically a caller sets gp's status away from Grunning and then
  3457  // immediately calls dropg to finish the job. The caller is also responsible
  3458  // for arranging that gp will be restarted using ready at an
  3459  // appropriate time. After calling dropg and arranging for gp to be
  3460  // readied later, the caller can do other work but eventually should
  3461  // call schedule to restart the scheduling of goroutines on this m.
  3462  func dropg() {
  3463  	gp := getg()
  3464  
  3465  	setMNoWB(&gp.m.curg.m, nil)
  3466  	setGNoWB(&gp.m.curg, nil)
  3467  }
  3468  
  3469  // checkTimers runs any timers for the P that are ready.
  3470  // If now is not 0 it is the current time.
  3471  // It returns the passed time or the current time if now was passed as 0.
  3472  // and the time when the next timer should run or 0 if there is no next timer,
  3473  // and reports whether it ran any timers.
  3474  // If the time when the next timer should run is not 0,
  3475  // it is always larger than the returned time.
  3476  // We pass now in and out to avoid extra calls of nanotime.
  3477  //
  3478  //go:yeswritebarrierrec
  3479  func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) {
  3480  	// If it's not yet time for the first timer, or the first adjusted
  3481  	// timer, then there is nothing to do.
  3482  	next := pp.timer0When.Load()
  3483  	nextAdj := pp.timerModifiedEarliest.Load()
  3484  	if next == 0 || (nextAdj != 0 && nextAdj < next) {
  3485  		next = nextAdj
  3486  	}
  3487  
  3488  	if next == 0 {
  3489  		// No timers to run or adjust.
  3490  		return now, 0, false
  3491  	}
  3492  
  3493  	if now == 0 {
  3494  		now = nanotime()
  3495  	}
  3496  	if now < next {
  3497  		// Next timer is not ready to run, but keep going
  3498  		// if we would clear deleted timers.
  3499  		// This corresponds to the condition below where
  3500  		// we decide whether to call clearDeletedTimers.
  3501  		if pp != getg().m.p.ptr() || int(pp.deletedTimers.Load()) <= int(pp.numTimers.Load()/4) {
  3502  			return now, next, false
  3503  		}
  3504  	}
  3505  
  3506  	lock(&pp.timersLock)
  3507  
  3508  	if len(pp.timers) > 0 {
  3509  		adjusttimers(pp, now)
  3510  		for len(pp.timers) > 0 {
  3511  			// Note that runtimer may temporarily unlock
  3512  			// pp.timersLock.
  3513  			if tw := runtimer(pp, now); tw != 0 {
  3514  				if tw > 0 {
  3515  					pollUntil = tw
  3516  				}
  3517  				break
  3518  			}
  3519  			ran = true
  3520  		}
  3521  	}
  3522  
  3523  	// If this is the local P, and there are a lot of deleted timers,
  3524  	// clear them out. We only do this for the local P to reduce
  3525  	// lock contention on timersLock.
  3526  	if pp == getg().m.p.ptr() && int(pp.deletedTimers.Load()) > len(pp.timers)/4 {
  3527  		clearDeletedTimers(pp)
  3528  	}
  3529  
  3530  	unlock(&pp.timersLock)
  3531  
  3532  	return now, pollUntil, ran
  3533  }
  3534  
  3535  func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
  3536  	unlock((*mutex)(lock))
  3537  	return true
  3538  }
  3539  
  3540  // park continuation on g0.
  3541  func park_m(gp *g) {
  3542  	mp := getg().m
  3543  
  3544  	if traceEnabled() {
  3545  		traceGoPark(mp.waittraceev, mp.waittraceskip)
  3546  	}
  3547  
  3548  	// N.B. Not using casGToWaiting here because the waitreason is
  3549  	// set by park_m's caller.
  3550  	casgstatus(gp, _Grunning, _Gwaiting)
  3551  	dropg()
  3552  
  3553  	if fn := mp.waitunlockf; fn != nil {
  3554  		ok := fn(gp, mp.waitlock)
  3555  		mp.waitunlockf = nil
  3556  		mp.waitlock = nil
  3557  		if !ok {
  3558  			if traceEnabled() {
  3559  				traceGoUnpark(gp, 2)
  3560  			}
  3561  			casgstatus(gp, _Gwaiting, _Grunnable)
  3562  			execute(gp, true) // Schedule it back, never returns.
  3563  		}
  3564  	}
  3565  	schedule()
  3566  }
  3567  
  3568  func goschedImpl(gp *g) {
  3569  	status := readgstatus(gp)
  3570  	if status&^_Gscan != _Grunning {
  3571  		dumpgstatus(gp)
  3572  		throw("bad g status")
  3573  	}
  3574  	casgstatus(gp, _Grunning, _Grunnable)
  3575  	dropg()
  3576  	lock(&sched.lock)
  3577  	globrunqput(gp)
  3578  	unlock(&sched.lock)
  3579  
  3580  	schedule()
  3581  }
  3582  
  3583  // Gosched continuation on g0.
  3584  func gosched_m(gp *g) {
  3585  	if traceEnabled() {
  3586  		traceGoSched()
  3587  	}
  3588  	goschedImpl(gp)
  3589  }
  3590  
  3591  // goschedguarded is a forbidden-states-avoided version of gosched_m.
  3592  func goschedguarded_m(gp *g) {
  3593  
  3594  	if !canPreemptM(gp.m) {
  3595  		gogo(&gp.sched) // never return
  3596  	}
  3597  
  3598  	if traceEnabled() {
  3599  		traceGoSched()
  3600  	}
  3601  	goschedImpl(gp)
  3602  }
  3603  
  3604  func gopreempt_m(gp *g) {
  3605  	if traceEnabled() {
  3606  		traceGoPreempt()
  3607  	}
  3608  	goschedImpl(gp)
  3609  }
  3610  
  3611  // preemptPark parks gp and puts it in _Gpreempted.
  3612  //
  3613  //go:systemstack
  3614  func preemptPark(gp *g) {
  3615  	if traceEnabled() {
  3616  		traceGoPark(traceEvGoBlock, 0)
  3617  	}
  3618  	status := readgstatus(gp)
  3619  	if status&^_Gscan != _Grunning {
  3620  		dumpgstatus(gp)
  3621  		throw("bad g status")
  3622  	}
  3623  
  3624  	if gp.asyncSafePoint {
  3625  		// Double-check that async preemption does not
  3626  		// happen in SPWRITE assembly functions.
  3627  		// isAsyncSafePoint must exclude this case.
  3628  		f := findfunc(gp.sched.pc)
  3629  		if !f.valid() {
  3630  			throw("preempt at unknown pc")
  3631  		}
  3632  		if f.flag&abi.FuncFlagSPWrite != 0 {
  3633  			println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
  3634  			throw("preempt SPWRITE")
  3635  		}
  3636  	}
  3637  
  3638  	// Transition from _Grunning to _Gscan|_Gpreempted. We can't
  3639  	// be in _Grunning when we dropg because then we'd be running
  3640  	// without an M, but the moment we're in _Gpreempted,
  3641  	// something could claim this G before we've fully cleaned it
  3642  	// up. Hence, we set the scan bit to lock down further
  3643  	// transitions until we can dropg.
  3644  	casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
  3645  	dropg()
  3646  	casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
  3647  	schedule()
  3648  }
  3649  
  3650  // goyield is like Gosched, but it:
  3651  // - emits a GoPreempt trace event instead of a GoSched trace event
  3652  // - puts the current G on the runq of the current P instead of the globrunq
  3653  func goyield() {
  3654  	checkTimeouts()
  3655  	mcall(goyield_m)
  3656  }
  3657  
  3658  func goyield_m(gp *g) {
  3659  	if traceEnabled() {
  3660  		traceGoPreempt()
  3661  	}
  3662  	pp := gp.m.p.ptr()
  3663  	casgstatus(gp, _Grunning, _Grunnable)
  3664  	dropg()
  3665  	runqput(pp, gp, false)
  3666  	schedule()
  3667  }
  3668  
  3669  // Finishes execution of the current goroutine.
  3670  func goexit1() {
  3671  	if raceenabled {
  3672  		racegoend()
  3673  	}
  3674  	if traceEnabled() {
  3675  		traceGoEnd()
  3676  	}
  3677  	mcall(goexit0)
  3678  }
  3679  
  3680  // goexit continuation on g0.
  3681  func goexit0(gp *g) {
  3682  	mp := getg().m
  3683  	pp := mp.p.ptr()
  3684  
  3685  	casgstatus(gp, _Grunning, _Gdead)
  3686  	gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
  3687  	if isSystemGoroutine(gp, false) {
  3688  		sched.ngsys.Add(-1)
  3689  	}
  3690  	gp.m = nil
  3691  	locked := gp.lockedm != 0
  3692  	gp.lockedm = 0
  3693  	mp.lockedg = 0
  3694  	gp.preemptStop = false
  3695  	gp.paniconfault = false
  3696  	gp._defer = nil // should be true already but just in case.
  3697  	gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
  3698  	gp.writebuf = nil
  3699  	gp.waitreason = waitReasonZero
  3700  	gp.param = nil
  3701  	gp.labels = nil
  3702  	gp.timer = nil
  3703  
  3704  	if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
  3705  		// Flush assist credit to the global pool. This gives
  3706  		// better information to pacing if the application is
  3707  		// rapidly creating an exiting goroutines.
  3708  		assistWorkPerByte := gcController.assistWorkPerByte.Load()
  3709  		scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
  3710  		gcController.bgScanCredit.Add(scanCredit)
  3711  		gp.gcAssistBytes = 0
  3712  	}
  3713  
  3714  	dropg()
  3715  
  3716  	if GOARCH == "wasm" { // no threads yet on wasm
  3717  		gfput(pp, gp)
  3718  		schedule() // never returns
  3719  	}
  3720  
  3721  	if mp.lockedInt != 0 {
  3722  		print("invalid m->lockedInt = ", mp.lockedInt, "\n")
  3723  		throw("internal lockOSThread error")
  3724  	}
  3725  	gfput(pp, gp)
  3726  	if locked {
  3727  		// The goroutine may have locked this thread because
  3728  		// it put it in an unusual kernel state. Kill it
  3729  		// rather than returning it to the thread pool.
  3730  
  3731  		// Return to mstart, which will release the P and exit
  3732  		// the thread.
  3733  		if GOOS != "plan9" { // See golang.org/issue/22227.
  3734  			gogo(&mp.g0.sched)
  3735  		} else {
  3736  			// Clear lockedExt on plan9 since we may end up re-using
  3737  			// this thread.
  3738  			mp.lockedExt = 0
  3739  		}
  3740  	}
  3741  	schedule()
  3742  }
  3743  
  3744  // save updates getg().sched to refer to pc and sp so that a following
  3745  // gogo will restore pc and sp.
  3746  //
  3747  // save must not have write barriers because invoking a write barrier
  3748  // can clobber getg().sched.
  3749  //
  3750  //go:nosplit
  3751  //go:nowritebarrierrec
  3752  func save(pc, sp uintptr) {
  3753  	gp := getg()
  3754  
  3755  	if gp == gp.m.g0 || gp == gp.m.gsignal {
  3756  		// m.g0.sched is special and must describe the context
  3757  		// for exiting the thread. mstart1 writes to it directly.
  3758  		// m.gsignal.sched should not be used at all.
  3759  		// This check makes sure save calls do not accidentally
  3760  		// run in contexts where they'd write to system g's.
  3761  		throw("save on system g not allowed")
  3762  	}
  3763  
  3764  	gp.sched.pc = pc
  3765  	gp.sched.sp = sp
  3766  	gp.sched.lr = 0
  3767  	gp.sched.ret = 0
  3768  	// We need to ensure ctxt is zero, but can't have a write
  3769  	// barrier here. However, it should always already be zero.
  3770  	// Assert that.
  3771  	if gp.sched.ctxt != nil {
  3772  		badctxt()
  3773  	}
  3774  }
  3775  
  3776  // The goroutine g is about to enter a system call.
  3777  // Record that it's not using the cpu anymore.
  3778  // This is called only from the go syscall library and cgocall,
  3779  // not from the low-level system calls used by the runtime.
  3780  //
  3781  // Entersyscall cannot split the stack: the save must
  3782  // make g->sched refer to the caller's stack segment, because
  3783  // entersyscall is going to return immediately after.
  3784  //
  3785  // Nothing entersyscall calls can split the stack either.
  3786  // We cannot safely move the stack during an active call to syscall,
  3787  // because we do not know which of the uintptr arguments are
  3788  // really pointers (back into the stack).
  3789  // In practice, this means that we make the fast path run through
  3790  // entersyscall doing no-split things, and the slow path has to use systemstack
  3791  // to run bigger things on the system stack.
  3792  //
  3793  // reentersyscall is the entry point used by cgo callbacks, where explicitly
  3794  // saved SP and PC are restored. This is needed when exitsyscall will be called
  3795  // from a function further up in the call stack than the parent, as g->syscallsp
  3796  // must always point to a valid stack frame. entersyscall below is the normal
  3797  // entry point for syscalls, which obtains the SP and PC from the caller.
  3798  //
  3799  // Syscall tracing:
  3800  // At the start of a syscall we emit traceGoSysCall to capture the stack trace.
  3801  // If the syscall does not block, that is it, we do not emit any other events.
  3802  // If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
  3803  // when syscall returns we emit traceGoSysExit and when the goroutine starts running
  3804  // (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
  3805  // To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
  3806  // we remember current value of syscalltick in m (gp.m.syscalltick = gp.m.p.ptr().syscalltick),
  3807  // whoever emits traceGoSysBlock increments p.syscalltick afterwards;
  3808  // and we wait for the increment before emitting traceGoSysExit.
  3809  // Note that the increment is done even if tracing is not enabled,
  3810  // because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
  3811  //
  3812  //go:nosplit
  3813  func reentersyscall(pc, sp uintptr) {
  3814  	gp := getg()
  3815  
  3816  	// Disable preemption because during this function g is in Gsyscall status,
  3817  	// but can have inconsistent g->sched, do not let GC observe it.
  3818  	gp.m.locks++
  3819  
  3820  	// Entersyscall must not call any function that might split/grow the stack.
  3821  	// (See details in comment above.)
  3822  	// Catch calls that might, by replacing the stack guard with something that
  3823  	// will trip any stack check and leaving a flag to tell newstack to die.
  3824  	gp.stackguard0 = stackPreempt
  3825  	gp.throwsplit = true
  3826  
  3827  	// Leave SP around for GC and traceback.
  3828  	save(pc, sp)
  3829  	gp.syscallsp = sp
  3830  	gp.syscallpc = pc
  3831  	casgstatus(gp, _Grunning, _Gsyscall)
  3832  	if staticLockRanking {
  3833  		// When doing static lock ranking casgstatus can call
  3834  		// systemstack which clobbers g.sched.
  3835  		save(pc, sp)
  3836  	}
  3837  	if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
  3838  		systemstack(func() {
  3839  			print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
  3840  			throw("entersyscall")
  3841  		})
  3842  	}
  3843  
  3844  	if traceEnabled() {
  3845  		systemstack(traceGoSysCall)
  3846  		// systemstack itself clobbers g.sched.{pc,sp} and we might
  3847  		// need them later when the G is genuinely blocked in a
  3848  		// syscall
  3849  		save(pc, sp)
  3850  	}
  3851  
  3852  	if sched.sysmonwait.Load() {
  3853  		systemstack(entersyscall_sysmon)
  3854  		save(pc, sp)
  3855  	}
  3856  
  3857  	if gp.m.p.ptr().runSafePointFn != 0 {
  3858  		// runSafePointFn may stack split if run on this stack
  3859  		systemstack(runSafePointFn)
  3860  		save(pc, sp)
  3861  	}
  3862  
  3863  	gp.m.syscalltick = gp.m.p.ptr().syscalltick
  3864  	gp.sysblocktraced = true
  3865  	pp := gp.m.p.ptr()
  3866  	pp.m = 0
  3867  	gp.m.oldp.set(pp)
  3868  	gp.m.p = 0
  3869  	atomic.Store(&pp.status, _Psyscall)
  3870  	if sched.gcwaiting.Load() {
  3871  		systemstack(entersyscall_gcwait)
  3872  		save(pc, sp)
  3873  	}
  3874  
  3875  	gp.m.locks--
  3876  }
  3877  
  3878  // Standard syscall entry used by the go syscall library and normal cgo calls.
  3879  //
  3880  // This is exported via linkname to assembly in the syscall package and x/sys.
  3881  //
  3882  //go:nosplit
  3883  //go:linkname entersyscall
  3884  func entersyscall() {
  3885  	reentersyscall(getcallerpc(), getcallersp())
  3886  }
  3887  
  3888  func entersyscall_sysmon() {
  3889  	lock(&sched.lock)
  3890  	if sched.sysmonwait.Load() {
  3891  		sched.sysmonwait.Store(false)
  3892  		notewakeup(&sched.sysmonnote)
  3893  	}
  3894  	unlock(&sched.lock)
  3895  }
  3896  
  3897  func entersyscall_gcwait() {
  3898  	gp := getg()
  3899  	pp := gp.m.oldp.ptr()
  3900  
  3901  	lock(&sched.lock)
  3902  	if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
  3903  		if traceEnabled() {
  3904  			traceGoSysBlock(pp)
  3905  			traceProcStop(pp)
  3906  		}
  3907  		pp.syscalltick++
  3908  		if sched.stopwait--; sched.stopwait == 0 {
  3909  			notewakeup(&sched.stopnote)
  3910  		}
  3911  	}
  3912  	unlock(&sched.lock)
  3913  }
  3914  
  3915  // The same as entersyscall(), but with a hint that the syscall is blocking.
  3916  //
  3917  //go:nosplit
  3918  func entersyscallblock() {
  3919  	gp := getg()
  3920  
  3921  	gp.m.locks++ // see comment in entersyscall
  3922  	gp.throwsplit = true
  3923  	gp.stackguard0 = stackPreempt // see comment in entersyscall
  3924  	gp.m.syscalltick = gp.m.p.ptr().syscalltick
  3925  	gp.sysblocktraced = true
  3926  	gp.m.p.ptr().syscalltick++
  3927  
  3928  	// Leave SP around for GC and traceback.
  3929  	pc := getcallerpc()
  3930  	sp := getcallersp()
  3931  	save(pc, sp)
  3932  	gp.syscallsp = gp.sched.sp
  3933  	gp.syscallpc = gp.sched.pc
  3934  	if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
  3935  		sp1 := sp
  3936  		sp2 := gp.sched.sp
  3937  		sp3 := gp.syscallsp
  3938  		systemstack(func() {
  3939  			print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
  3940  			throw("entersyscallblock")
  3941  		})
  3942  	}
  3943  	casgstatus(gp, _Grunning, _Gsyscall)
  3944  	if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
  3945  		systemstack(func() {
  3946  			print("entersyscallblock inconsistent ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
  3947  			throw("entersyscallblock")
  3948  		})
  3949  	}
  3950  
  3951  	systemstack(entersyscallblock_handoff)
  3952  
  3953  	// Resave for traceback during blocked call.
  3954  	save(getcallerpc(), getcallersp())
  3955  
  3956  	gp.m.locks--
  3957  }
  3958  
  3959  func entersyscallblock_handoff() {
  3960  	if traceEnabled() {
  3961  		traceGoSysCall()
  3962  		traceGoSysBlock(getg().m.p.ptr())
  3963  	}
  3964  	handoffp(releasep())
  3965  }
  3966  
  3967  // The goroutine g exited its system call.
  3968  // Arrange for it to run on a cpu again.
  3969  // This is called only from the go syscall library, not
  3970  // from the low-level system calls used by the runtime.
  3971  //
  3972  // Write barriers are not allowed because our P may have been stolen.
  3973  //
  3974  // This is exported via linkname to assembly in the syscall package.
  3975  //
  3976  //go:nosplit
  3977  //go:nowritebarrierrec
  3978  //go:linkname exitsyscall
  3979  func exitsyscall() {
  3980  	gp := getg()
  3981  
  3982  	gp.m.locks++ // see comment in entersyscall
  3983  	if getcallersp() > gp.syscallsp {
  3984  		throw("exitsyscall: syscall frame is no longer valid")
  3985  	}
  3986  
  3987  	gp.waitsince = 0
  3988  	oldp := gp.m.oldp.ptr()
  3989  	gp.m.oldp = 0
  3990  	if exitsyscallfast(oldp) {
  3991  		// When exitsyscallfast returns success, we have a P so can now use
  3992  		// write barriers
  3993  		if goroutineProfile.active {
  3994  			// Make sure that gp has had its stack written out to the goroutine
  3995  			// profile, exactly as it was when the goroutine profiler first
  3996  			// stopped the world.
  3997  			systemstack(func() {
  3998  				tryRecordGoroutineProfileWB(gp)
  3999  			})
  4000  		}
  4001  		if traceEnabled() {
  4002  			if oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick {
  4003  				systemstack(traceGoStart)
  4004  			}
  4005  		}
  4006  		// There's a cpu for us, so we can run.
  4007  		gp.m.p.ptr().syscalltick++
  4008  		// We need to cas the status and scan before resuming...
  4009  		casgstatus(gp, _Gsyscall, _Grunning)
  4010  
  4011  		// Garbage collector isn't running (since we are),
  4012  		// so okay to clear syscallsp.
  4013  		gp.syscallsp = 0
  4014  		gp.m.locks--
  4015  		if gp.preempt {
  4016  			// restore the preemption request in case we've cleared it in newstack
  4017  			gp.stackguard0 = stackPreempt
  4018  		} else {
  4019  			// otherwise restore the real stackGuard, we've spoiled it in entersyscall/entersyscallblock
  4020  			gp.stackguard0 = gp.stack.lo + stackGuard
  4021  		}
  4022  		gp.throwsplit = false
  4023  
  4024  		if sched.disable.user && !schedEnabled(gp) {
  4025  			// Scheduling of this goroutine is disabled.
  4026  			Gosched()
  4027  		}
  4028  
  4029  		return
  4030  	}
  4031  
  4032  	gp.sysexitticks = 0
  4033  	if traceEnabled() {
  4034  		// Wait till traceGoSysBlock event is emitted.
  4035  		// This ensures consistency of the trace (the goroutine is started after it is blocked).
  4036  		for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
  4037  			osyield()
  4038  		}
  4039  		// We can't trace syscall exit right now because we don't have a P.
  4040  		// Tracing code can invoke write barriers that cannot run without a P.
  4041  		// So instead we remember the syscall exit time and emit the event
  4042  		// in execute when we have a P.
  4043  		gp.sysexitticks = cputicks()
  4044  	}
  4045  
  4046  	gp.m.locks--
  4047  
  4048  	// Call the scheduler.
  4049  	mcall(exitsyscall0)
  4050  
  4051  	// Scheduler returned, so we're allowed to run now.
  4052  	// Delete the syscallsp information that we left for
  4053  	// the garbage collector during the system call.
  4054  	// Must wait until now because until gosched returns
  4055  	// we don't know for sure that the garbage collector
  4056  	// is not running.
  4057  	gp.syscallsp = 0
  4058  	gp.m.p.ptr().syscalltick++
  4059  	gp.throwsplit = false
  4060  }
  4061  
  4062  //go:nosplit
  4063  func exitsyscallfast(oldp *p) bool {
  4064  	gp := getg()
  4065  
  4066  	// Freezetheworld sets stopwait but does not retake P's.
  4067  	if sched.stopwait == freezeStopWait {
  4068  		return false
  4069  	}
  4070  
  4071  	// Try to re-acquire the last P.
  4072  	if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
  4073  		// There's a cpu for us, so we can run.
  4074  		wirep(oldp)
  4075  		exitsyscallfast_reacquired()
  4076  		return true
  4077  	}
  4078  
  4079  	// Try to get any other idle P.
  4080  	if sched.pidle != 0 {
  4081  		var ok bool
  4082  		systemstack(func() {
  4083  			ok = exitsyscallfast_pidle()
  4084  			if ok && traceEnabled() {
  4085  				if oldp != nil {
  4086  					// Wait till traceGoSysBlock event is emitted.
  4087  					// This ensures consistency of the trace (the goroutine is started after it is blocked).
  4088  					for oldp.syscalltick == gp.m.syscalltick {
  4089  						osyield()
  4090  					}
  4091  				}
  4092  				traceGoSysExit(0)
  4093  			}
  4094  		})
  4095  		if ok {
  4096  			return true
  4097  		}
  4098  	}
  4099  	return false
  4100  }
  4101  
  4102  // exitsyscallfast_reacquired is the exitsyscall path on which this G
  4103  // has successfully reacquired the P it was running on before the
  4104  // syscall.
  4105  //
  4106  //go:nosplit
  4107  func exitsyscallfast_reacquired() {
  4108  	gp := getg()
  4109  	if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
  4110  		if traceEnabled() {
  4111  			// The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed).
  4112  			// traceGoSysBlock for this syscall was already emitted,
  4113  			// but here we effectively retake the p from the new syscall running on the same p.
  4114  			systemstack(func() {
  4115  				// Denote blocking of the new syscall.
  4116  				traceGoSysBlock(gp.m.p.ptr())
  4117  				// Denote completion of the current syscall.
  4118  				traceGoSysExit(0)
  4119  			})
  4120  		}
  4121  		gp.m.p.ptr().syscalltick++
  4122  	}
  4123  }
  4124  
  4125  func exitsyscallfast_pidle() bool {
  4126  	lock(&sched.lock)
  4127  	pp, _ := pidleget(0)
  4128  	if pp != nil && sched.sysmonwait.Load() {
  4129  		sched.sysmonwait.Store(false)
  4130  		notewakeup(&sched.sysmonnote)
  4131  	}
  4132  	unlock(&sched.lock)
  4133  	if pp != nil {
  4134  		acquirep(pp)
  4135  		return true
  4136  	}
  4137  	return false
  4138  }
  4139  
  4140  // exitsyscall slow path on g0.
  4141  // Failed to acquire P, enqueue gp as runnable.
  4142  //
  4143  // Called via mcall, so gp is the calling g from this M.
  4144  //
  4145  //go:nowritebarrierrec
  4146  func exitsyscall0(gp *g) {
  4147  	casgstatus(gp, _Gsyscall, _Grunnable)
  4148  	dropg()
  4149  	lock(&sched.lock)
  4150  	var pp *p
  4151  	if schedEnabled(gp) {
  4152  		pp, _ = pidleget(0)
  4153  	}
  4154  	var locked bool
  4155  	if pp == nil {
  4156  		globrunqput(gp)
  4157  
  4158  		// Below, we stoplockedm if gp is locked. globrunqput releases
  4159  		// ownership of gp, so we must check if gp is locked prior to
  4160  		// committing the release by unlocking sched.lock, otherwise we
  4161  		// could race with another M transitioning gp from unlocked to
  4162  		// locked.
  4163  		locked = gp.lockedm != 0
  4164  	} else if sched.sysmonwait.Load() {
  4165  		sched.sysmonwait.Store(false)
  4166  		notewakeup(&sched.sysmonnote)
  4167  	}
  4168  	unlock(&sched.lock)
  4169  	if pp != nil {
  4170  		acquirep(pp)
  4171  		execute(gp, false) // Never returns.
  4172  	}
  4173  	if locked {
  4174  		// Wait until another thread schedules gp and so m again.
  4175  		//
  4176  		// N.B. lockedm must be this M, as this g was running on this M
  4177  		// before entersyscall.
  4178  		stoplockedm()
  4179  		execute(gp, false) // Never returns.
  4180  	}
  4181  	stopm()
  4182  	schedule() // Never returns.
  4183  }
  4184  
  4185  // Called from syscall package before fork.
  4186  //
  4187  //go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
  4188  //go:nosplit
  4189  func syscall_runtime_BeforeFork() {
  4190  	gp := getg().m.curg
  4191  
  4192  	// Block signals during a fork, so that the child does not run
  4193  	// a signal handler before exec if a signal is sent to the process
  4194  	// group. See issue #18600.
  4195  	gp.m.locks++
  4196  	sigsave(&gp.m.sigmask)
  4197  	sigblock(false)
  4198  
  4199  	// This function is called before fork in syscall package.
  4200  	// Code between fork and exec must not allocate memory nor even try to grow stack.
  4201  	// Here we spoil g.stackguard0 to reliably detect any attempts to grow stack.
  4202  	// runtime_AfterFork will undo this in parent process, but not in child.
  4203  	gp.stackguard0 = stackFork
  4204  }
  4205  
  4206  // Called from syscall package after fork in parent.
  4207  //
  4208  //go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
  4209  //go:nosplit
  4210  func syscall_runtime_AfterFork() {
  4211  	gp := getg().m.curg
  4212  
  4213  	// See the comments in beforefork.
  4214  	gp.stackguard0 = gp.stack.lo + stackGuard
  4215  
  4216  	msigrestore(gp.m.sigmask)
  4217  
  4218  	gp.m.locks--
  4219  }
  4220  
  4221  // inForkedChild is true while manipulating signals in the child process.
  4222  // This is used to avoid calling libc functions in case we are using vfork.
  4223  var inForkedChild bool
  4224  
  4225  // Called from syscall package after fork in child.
  4226  // It resets non-sigignored signals to the default handler, and
  4227  // restores the signal mask in preparation for the exec.
  4228  //
  4229  // Because this might be called during a vfork, and therefore may be
  4230  // temporarily sharing address space with the parent process, this must
  4231  // not change any global variables or calling into C code that may do so.
  4232  //
  4233  //go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
  4234  //go:nosplit
  4235  //go:nowritebarrierrec
  4236  func syscall_runtime_AfterForkInChild() {
  4237  	// It's OK to change the global variable inForkedChild here
  4238  	// because we are going to change it back. There is no race here,
  4239  	// because if we are sharing address space with the parent process,
  4240  	// then the parent process can not be running concurrently.
  4241  	inForkedChild = true
  4242  
  4243  	clearSignalHandlers()
  4244  
  4245  	// When we are the child we are the only thread running,
  4246  	// so we know that nothing else has changed gp.m.sigmask.
  4247  	msigrestore(getg().m.sigmask)
  4248  
  4249  	inForkedChild = false
  4250  }
  4251  
  4252  // pendingPreemptSignals is the number of preemption signals
  4253  // that have been sent but not received. This is only used on Darwin.
  4254  // For #41702.
  4255  var pendingPreemptSignals atomic.Int32
  4256  
  4257  // Called from syscall package before Exec.
  4258  //
  4259  //go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
  4260  func syscall_runtime_BeforeExec() {
  4261  	// Prevent thread creation during exec.
  4262  	execLock.lock()
  4263  
  4264  	// On Darwin, wait for all pending preemption signals to
  4265  	// be received. See issue #41702.
  4266  	if GOOS == "darwin" || GOOS == "ios" {
  4267  		for pendingPreemptSignals.Load() > 0 {
  4268  			osyield()
  4269  		}
  4270  	}
  4271  }
  4272  
  4273  // Called from syscall package after Exec.
  4274  //
  4275  //go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
  4276  func syscall_runtime_AfterExec() {
  4277  	execLock.unlock()
  4278  }
  4279  
  4280  // Allocate a new g, with a stack big enough for stacksize bytes.
  4281  func malg(stacksize int32) *g {
  4282  	newg := new(g)
  4283  	if stacksize >= 0 {
  4284  		stacksize = round2(stackSystem + stacksize)
  4285  		systemstack(func() {
  4286  			newg.stack = stackalloc(uint32(stacksize))
  4287  		})
  4288  		newg.stackguard0 = newg.stack.lo + stackGuard
  4289  		newg.stackguard1 = ^uintptr(0)
  4290  		// Clear the bottom word of the stack. We record g
  4291  		// there on gsignal stack during VDSO on ARM and ARM64.
  4292  		*(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
  4293  	}
  4294  	return newg
  4295  }
  4296  
  4297  // Create a new g running fn.
  4298  // Put it on the queue of g's waiting to run.
  4299  // The compiler turns a go statement into a call to this.
  4300  func newproc(fn *funcval) {
  4301  	gp := getg()
  4302  	pc := getcallerpc()
  4303  	systemstack(func() {
  4304  		newg := newproc1(fn, gp, pc)
  4305  
  4306  		pp := getg().m.p.ptr()
  4307  		runqput(pp, newg, true)
  4308  
  4309  		if mainStarted {
  4310  			wakep()
  4311  		}
  4312  	})
  4313  }
  4314  
  4315  // Create a new g in state _Grunnable, starting at fn. callerpc is the
  4316  // address of the go statement that created this. The caller is responsible
  4317  // for adding the new g to the scheduler.
  4318  func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
  4319  	if fn == nil {
  4320  		fatal("go of nil func value")
  4321  	}
  4322  
  4323  	mp := acquirem() // disable preemption because we hold M and P in local vars.
  4324  	pp := mp.p.ptr()
  4325  	newg := gfget(pp)
  4326  	if newg == nil {
  4327  		newg = malg(stackMin)
  4328  		casgstatus(newg, _Gidle, _Gdead)
  4329  		allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
  4330  	}
  4331  	if newg.stack.hi == 0 {
  4332  		throw("newproc1: newg missing stack")
  4333  	}
  4334  
  4335  	if readgstatus(newg) != _Gdead {
  4336  		throw("newproc1: new g is not Gdead")
  4337  	}
  4338  
  4339  	totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
  4340  	totalSize = alignUp(totalSize, sys.StackAlign)
  4341  	sp := newg.stack.hi - totalSize
  4342  	spArg := sp
  4343  	if usesLR {
  4344  		// caller's LR
  4345  		*(*uintptr)(unsafe.Pointer(sp)) = 0
  4346  		prepGoExitFrame(sp)
  4347  		spArg += sys.MinFrameSize
  4348  	}
  4349  
  4350  	memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
  4351  	newg.sched.sp = sp
  4352  	newg.stktopsp = sp
  4353  	newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
  4354  	newg.sched.g = guintptr(unsafe.Pointer(newg))
  4355  	gostartcallfn(&newg.sched, fn)
  4356  	newg.parentGoid = callergp.goid
  4357  	newg.gopc = callerpc
  4358  	newg.ancestors = saveAncestors(callergp)
  4359  	newg.startpc = fn.fn
  4360  	if isSystemGoroutine(newg, false) {
  4361  		sched.ngsys.Add(1)
  4362  	} else {
  4363  		// Only user goroutines inherit pprof labels.
  4364  		if mp.curg != nil {
  4365  			newg.labels = mp.curg.labels
  4366  		}
  4367  		if goroutineProfile.active {
  4368  			// A concurrent goroutine profile is running. It should include
  4369  			// exactly the set of goroutines that were alive when the goroutine
  4370  			// profiler first stopped the world. That does not include newg, so
  4371  			// mark it as not needing a profile before transitioning it from
  4372  			// _Gdead.
  4373  			newg.goroutineProfiled.Store(goroutineProfileSatisfied)
  4374  		}
  4375  	}
  4376  	// Track initial transition?
  4377  	newg.trackingSeq = uint8(fastrand())
  4378  	if newg.trackingSeq%gTrackingPeriod == 0 {
  4379  		newg.tracking = true
  4380  	}
  4381  	casgstatus(newg, _Gdead, _Grunnable)
  4382  	gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
  4383  
  4384  	if pp.goidcache == pp.goidcacheend {
  4385  		// Sched.goidgen is the last allocated id,
  4386  		// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
  4387  		// At startup sched.goidgen=0, so main goroutine receives goid=1.
  4388  		pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
  4389  		pp.goidcache -= _GoidCacheBatch - 1
  4390  		pp.goidcacheend = pp.goidcache + _GoidCacheBatch
  4391  	}
  4392  	newg.goid = pp.goidcache
  4393  	pp.goidcache++
  4394  	if raceenabled {
  4395  		newg.racectx = racegostart(callerpc)
  4396  		if newg.labels != nil {
  4397  			// See note in proflabel.go on labelSync's role in synchronizing
  4398  			// with the reads in the signal handler.
  4399  			racereleasemergeg(newg, unsafe.Pointer(&labelSync))
  4400  		}
  4401  	}
  4402  	if traceEnabled() {
  4403  		traceGoCreate(newg, newg.startpc)
  4404  	}
  4405  	releasem(mp)
  4406  
  4407  	return newg
  4408  }
  4409  
  4410  // saveAncestors copies previous ancestors of the given caller g and
  4411  // includes info for the current caller into a new set of tracebacks for
  4412  // a g being created.
  4413  func saveAncestors(callergp *g) *[]ancestorInfo {
  4414  	// Copy all prior info, except for the root goroutine (goid 0).
  4415  	if debug.tracebackancestors <= 0 || callergp.goid == 0 {
  4416  		return nil
  4417  	}
  4418  	var callerAncestors []ancestorInfo
  4419  	if callergp.ancestors != nil {
  4420  		callerAncestors = *callergp.ancestors
  4421  	}
  4422  	n := int32(len(callerAncestors)) + 1
  4423  	if n > debug.tracebackancestors {
  4424  		n = debug.tracebackancestors
  4425  	}
  4426  	ancestors := make([]ancestorInfo, n)
  4427  	copy(ancestors[1:], callerAncestors)
  4428  
  4429  	var pcs [tracebackInnerFrames]uintptr
  4430  	npcs := gcallers(callergp, 0, pcs[:])
  4431  	ipcs := make([]uintptr, npcs)
  4432  	copy(ipcs, pcs[:])
  4433  	ancestors[0] = ancestorInfo{
  4434  		pcs:  ipcs,
  4435  		goid: callergp.goid,
  4436  		gopc: callergp.gopc,
  4437  	}
  4438  
  4439  	ancestorsp := new([]ancestorInfo)
  4440  	*ancestorsp = ancestors
  4441  	return ancestorsp
  4442  }
  4443  
  4444  // Put on gfree list.
  4445  // If local list is too long, transfer a batch to the global list.
  4446  func gfput(pp *p, gp *g) {
  4447  	if readgstatus(gp) != _Gdead {
  4448  		throw("gfput: bad status (not Gdead)")
  4449  	}
  4450  
  4451  	stksize := gp.stack.hi - gp.stack.lo
  4452  
  4453  	if stksize != uintptr(startingStackSize) {
  4454  		// non-standard stack size - free it.
  4455  		stackfree(gp.stack)
  4456  		gp.stack.lo = 0
  4457  		gp.stack.hi = 0
  4458  		gp.stackguard0 = 0
  4459  	}
  4460  
  4461  	pp.gFree.push(gp)
  4462  	pp.gFree.n++
  4463  	if pp.gFree.n >= 64 {
  4464  		var (
  4465  			inc      int32
  4466  			stackQ   gQueue
  4467  			noStackQ gQueue
  4468  		)
  4469  		for pp.gFree.n >= 32 {
  4470  			gp := pp.gFree.pop()
  4471  			pp.gFree.n--
  4472  			if gp.stack.lo == 0 {
  4473  				noStackQ.push(gp)
  4474  			} else {
  4475  				stackQ.push(gp)
  4476  			}
  4477  			inc++
  4478  		}
  4479  		lock(&sched.gFree.lock)
  4480  		sched.gFree.noStack.pushAll(noStackQ)
  4481  		sched.gFree.stack.pushAll(stackQ)
  4482  		sched.gFree.n += inc
  4483  		unlock(&sched.gFree.lock)
  4484  	}
  4485  }
  4486  
  4487  // Get from gfree list.
  4488  // If local list is empty, grab a batch from global list.
  4489  func gfget(pp *p) *g {
  4490  retry:
  4491  	if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
  4492  		lock(&sched.gFree.lock)
  4493  		// Move a batch of free Gs to the P.
  4494  		for pp.gFree.n < 32 {
  4495  			// Prefer Gs with stacks.
  4496  			gp := sched.gFree.stack.pop()
  4497  			if gp == nil {
  4498  				gp = sched.gFree.noStack.pop()
  4499  				if gp == nil {
  4500  					break
  4501  				}
  4502  			}
  4503  			sched.gFree.n--
  4504  			pp.gFree.push(gp)
  4505  			pp.gFree.n++
  4506  		}
  4507  		unlock(&sched.gFree.lock)
  4508  		goto retry
  4509  	}
  4510  	gp := pp.gFree.pop()
  4511  	if gp == nil {
  4512  		return nil
  4513  	}
  4514  	pp.gFree.n--
  4515  	if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
  4516  		// Deallocate old stack. We kept it in gfput because it was the
  4517  		// right size when the goroutine was put on the free list, but
  4518  		// the right size has changed since then.
  4519  		systemstack(func() {
  4520  			stackfree(gp.stack)
  4521  			gp.stack.lo = 0
  4522  			gp.stack.hi = 0
  4523  			gp.stackguard0 = 0
  4524  		})
  4525  	}
  4526  	if gp.stack.lo == 0 {
  4527  		// Stack was deallocated in gfput or just above. Allocate a new one.
  4528  		systemstack(func() {
  4529  			gp.stack = stackalloc(startingStackSize)
  4530  		})
  4531  		gp.stackguard0 = gp.stack.lo + stackGuard
  4532  	} else {
  4533  		if raceenabled {
  4534  			racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
  4535  		}
  4536  		if msanenabled {
  4537  			msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
  4538  		}
  4539  		if asanenabled {
  4540  			asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
  4541  		}
  4542  	}
  4543  	return gp
  4544  }
  4545  
  4546  // Purge all cached G's from gfree list to the global list.
  4547  func gfpurge(pp *p) {
  4548  	var (
  4549  		inc      int32
  4550  		stackQ   gQueue
  4551  		noStackQ gQueue
  4552  	)
  4553  	for !pp.gFree.empty() {
  4554  		gp := pp.gFree.pop()
  4555  		pp.gFree.n--
  4556  		if gp.stack.lo == 0 {
  4557  			noStackQ.push(gp)
  4558  		} else {
  4559  			stackQ.push(gp)
  4560  		}
  4561  		inc++
  4562  	}
  4563  	lock(&sched.gFree.lock)
  4564  	sched.gFree.noStack.pushAll(noStackQ)
  4565  	sched.gFree.stack.pushAll(stackQ)
  4566  	sched.gFree.n += inc
  4567  	unlock(&sched.gFree.lock)
  4568  }
  4569  
  4570  // Breakpoint executes a breakpoint trap.
  4571  func Breakpoint() {
  4572  	breakpoint()
  4573  }
  4574  
  4575  // dolockOSThread is called by LockOSThread and lockOSThread below
  4576  // after they modify m.locked. Do not allow preemption during this call,
  4577  // or else the m might be different in this function than in the caller.
  4578  //
  4579  //go:nosplit
  4580  func dolockOSThread() {
  4581  	if GOARCH == "wasm" {
  4582  		return // no threads on wasm yet
  4583  	}
  4584  	gp := getg()
  4585  	gp.m.lockedg.set(gp)
  4586  	gp.lockedm.set(gp.m)
  4587  }
  4588  
  4589  // LockOSThread wires the calling goroutine to its current operating system thread.
  4590  // The calling goroutine will always execute in that thread,
  4591  // and no other goroutine will execute in it,
  4592  // until the calling goroutine has made as many calls to
  4593  // UnlockOSThread as to LockOSThread.
  4594  // If the calling goroutine exits without unlocking the thread,
  4595  // the thread will be terminated.
  4596  //
  4597  // All init functions are run on the startup thread. Calling LockOSThread
  4598  // from an init function will cause the main function to be invoked on
  4599  // that thread.
  4600  //
  4601  // A goroutine should call LockOSThread before calling OS services or
  4602  // non-Go library functions that depend on per-thread state.
  4603  //
  4604  //go:nosplit
  4605  func LockOSThread() {
  4606  	if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
  4607  		// If we need to start a new thread from the locked
  4608  		// thread, we need the template thread. Start it now
  4609  		// while we're in a known-good state.
  4610  		startTemplateThread()
  4611  	}
  4612  	gp := getg()
  4613  	gp.m.lockedExt++
  4614  	if gp.m.lockedExt == 0 {
  4615  		gp.m.lockedExt--
  4616  		panic("LockOSThread nesting overflow")
  4617  	}
  4618  	dolockOSThread()
  4619  }
  4620  
  4621  //go:nosplit
  4622  func lockOSThread() {
  4623  	getg().m.lockedInt++
  4624  	dolockOSThread()
  4625  }
  4626  
  4627  // dounlockOSThread is called by UnlockOSThread and unlockOSThread below
  4628  // after they update m->locked. Do not allow preemption during this call,
  4629  // or else the m might be in different in this function than in the caller.
  4630  //
  4631  //go:nosplit
  4632  func dounlockOSThread() {
  4633  	if GOARCH == "wasm" {
  4634  		return // no threads on wasm yet
  4635  	}
  4636  	gp := getg()
  4637  	if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
  4638  		return
  4639  	}
  4640  	gp.m.lockedg = 0
  4641  	gp.lockedm = 0
  4642  }
  4643  
  4644  // UnlockOSThread undoes an earlier call to LockOSThread.
  4645  // If this drops the number of active LockOSThread calls on the
  4646  // calling goroutine to zero, it unwires the calling goroutine from
  4647  // its fixed operating system thread.
  4648  // If there are no active LockOSThread calls, this is a no-op.
  4649  //
  4650  // Before calling UnlockOSThread, the caller must ensure that the OS
  4651  // thread is suitable for running other goroutines. If the caller made
  4652  // any permanent changes to the state of the thread that would affect
  4653  // other goroutines, it should not call this function and thus leave
  4654  // the goroutine locked to the OS thread until the goroutine (and
  4655  // hence the thread) exits.
  4656  //
  4657  //go:nosplit
  4658  func UnlockOSThread() {
  4659  	gp := getg()
  4660  	if gp.m.lockedExt == 0 {
  4661  		return
  4662  	}
  4663  	gp.m.lockedExt--
  4664  	dounlockOSThread()
  4665  }
  4666  
  4667  //go:nosplit
  4668  func unlockOSThread() {
  4669  	gp := getg()
  4670  	if gp.m.lockedInt == 0 {
  4671  		systemstack(badunlockosthread)
  4672  	}
  4673  	gp.m.lockedInt--
  4674  	dounlockOSThread()
  4675  }
  4676  
  4677  func badunlockosthread() {
  4678  	throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
  4679  }
  4680  
  4681  func gcount() int32 {
  4682  	n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.n - sched.ngsys.Load()
  4683  	for _, pp := range allp {
  4684  		n -= pp.gFree.n
  4685  	}
  4686  
  4687  	// All these variables can be changed concurrently, so the result can be inconsistent.
  4688  	// But at least the current goroutine is running.
  4689  	if n < 1 {
  4690  		n = 1
  4691  	}
  4692  	return n
  4693  }
  4694  
  4695  func mcount() int32 {
  4696  	return int32(sched.mnext - sched.nmfreed)
  4697  }
  4698  
  4699  var prof struct {
  4700  	signalLock atomic.Uint32
  4701  
  4702  	// Must hold signalLock to write. Reads may be lock-free, but
  4703  	// signalLock should be taken to synchronize with changes.
  4704  	hz atomic.Int32
  4705  }
  4706  
  4707  func _System()                    { _System() }
  4708  func _ExternalCode()              { _ExternalCode() }
  4709  func _LostExternalCode()          { _LostExternalCode() }
  4710  func _GC()                        { _GC() }
  4711  func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
  4712  func _VDSO()                      { _VDSO() }
  4713  
  4714  // Called if we receive a SIGPROF signal.
  4715  // Called by the signal handler, may run during STW.
  4716  //
  4717  //go:nowritebarrierrec
  4718  func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
  4719  	if prof.hz.Load() == 0 {
  4720  		return
  4721  	}
  4722  
  4723  	// If mp.profilehz is 0, then profiling is not enabled for this thread.
  4724  	// We must check this to avoid a deadlock between setcpuprofilerate
  4725  	// and the call to cpuprof.add, below.
  4726  	if mp != nil && mp.profilehz == 0 {
  4727  		return
  4728  	}
  4729  
  4730  	// On mips{,le}/arm, 64bit atomics are emulated with spinlocks, in
  4731  	// runtime/internal/atomic. If SIGPROF arrives while the program is inside
  4732  	// the critical section, it creates a deadlock (when writing the sample).
  4733  	// As a workaround, create a counter of SIGPROFs while in critical section
  4734  	// to store the count, and pass it to sigprof.add() later when SIGPROF is
  4735  	// received from somewhere else (with _LostSIGPROFDuringAtomic64 as pc).
  4736  	if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
  4737  		if f := findfunc(pc); f.valid() {
  4738  			if hasPrefix(funcname(f), "runtime/internal/atomic") {
  4739  				cpuprof.lostAtomic++
  4740  				return
  4741  			}
  4742  		}
  4743  		if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
  4744  			// runtime/internal/atomic functions call into kernel
  4745  			// helpers on arm < 7. See
  4746  			// runtime/internal/atomic/sys_linux_arm.s.
  4747  			cpuprof.lostAtomic++
  4748  			return
  4749  		}
  4750  	}
  4751  
  4752  	// Profiling runs concurrently with GC, so it must not allocate.
  4753  	// Set a trap in case the code does allocate.
  4754  	// Note that on windows, one thread takes profiles of all the
  4755  	// other threads, so mp is usually not getg().m.
  4756  	// In fact mp may not even be stopped.
  4757  	// See golang.org/issue/17165.
  4758  	getg().m.mallocing++
  4759  
  4760  	var u unwinder
  4761  	var stk [maxCPUProfStack]uintptr
  4762  	n := 0
  4763  	if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
  4764  		cgoOff := 0
  4765  		// Check cgoCallersUse to make sure that we are not
  4766  		// interrupting other code that is fiddling with
  4767  		// cgoCallers.  We are running in a signal handler
  4768  		// with all signals blocked, so we don't have to worry
  4769  		// about any other code interrupting us.
  4770  		if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
  4771  			for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
  4772  				cgoOff++
  4773  			}
  4774  			n += copy(stk[:], mp.cgoCallers[:cgoOff])
  4775  			mp.cgoCallers[0] = 0
  4776  		}
  4777  
  4778  		// Collect Go stack that leads to the cgo call.
  4779  		u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
  4780  	} else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
  4781  		// Libcall, i.e. runtime syscall on windows.
  4782  		// Collect Go stack that leads to the call.
  4783  		u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
  4784  	} else if mp != nil && mp.vdsoSP != 0 {
  4785  		// VDSO call, e.g. nanotime1 on Linux.
  4786  		// Collect Go stack that leads to the call.
  4787  		u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
  4788  	} else {
  4789  		u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
  4790  	}
  4791  	n += tracebackPCs(&u, 0, stk[n:])
  4792  
  4793  	if n <= 0 {
  4794  		// Normal traceback is impossible or has failed.
  4795  		// Account it against abstract "System" or "GC".
  4796  		n = 2
  4797  		if inVDSOPage(pc) {
  4798  			pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
  4799  		} else if pc > firstmoduledata.etext {
  4800  			// "ExternalCode" is better than "etext".
  4801  			pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
  4802  		}
  4803  		stk[0] = pc
  4804  		if mp.preemptoff != "" {
  4805  			stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
  4806  		} else {
  4807  			stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
  4808  		}
  4809  	}
  4810  
  4811  	if prof.hz.Load() != 0 {
  4812  		// Note: it can happen on Windows that we interrupted a system thread
  4813  		// with no g, so gp could nil. The other nil checks are done out of
  4814  		// caution, but not expected to be nil in practice.
  4815  		var tagPtr *unsafe.Pointer
  4816  		if gp != nil && gp.m != nil && gp.m.curg != nil {
  4817  			tagPtr = &gp.m.curg.labels
  4818  		}
  4819  		cpuprof.add(tagPtr, stk[:n])
  4820  
  4821  		gprof := gp
  4822  		var pp *p
  4823  		if gp != nil && gp.m != nil {
  4824  			if gp.m.curg != nil {
  4825  				gprof = gp.m.curg
  4826  			}
  4827  			pp = gp.m.p.ptr()
  4828  		}
  4829  		traceCPUSample(gprof, pp, stk[:n])
  4830  	}
  4831  	getg().m.mallocing--
  4832  }
  4833  
  4834  // setcpuprofilerate sets the CPU profiling rate to hz times per second.
  4835  // If hz <= 0, setcpuprofilerate turns off CPU profiling.
  4836  func setcpuprofilerate(hz int32) {
  4837  	// Force sane arguments.
  4838  	if hz < 0 {
  4839  		hz = 0
  4840  	}
  4841  
  4842  	// Disable preemption, otherwise we can be rescheduled to another thread
  4843  	// that has profiling enabled.
  4844  	gp := getg()
  4845  	gp.m.locks++
  4846  
  4847  	// Stop profiler on this thread so that it is safe to lock prof.
  4848  	// if a profiling signal came in while we had prof locked,
  4849  	// it would deadlock.
  4850  	setThreadCPUProfiler(0)
  4851  
  4852  	for !prof.signalLock.CompareAndSwap(0, 1) {
  4853  		osyield()
  4854  	}
  4855  	if prof.hz.Load() != hz {
  4856  		setProcessCPUProfiler(hz)
  4857  		prof.hz.Store(hz)
  4858  	}
  4859  	prof.signalLock.Store(0)
  4860  
  4861  	lock(&sched.lock)
  4862  	sched.profilehz = hz
  4863  	unlock(&sched.lock)
  4864  
  4865  	if hz != 0 {
  4866  		setThreadCPUProfiler(hz)
  4867  	}
  4868  
  4869  	gp.m.locks--
  4870  }
  4871  
  4872  // init initializes pp, which may be a freshly allocated p or a
  4873  // previously destroyed p, and transitions it to status _Pgcstop.
  4874  func (pp *p) init(id int32) {
  4875  	pp.id = id
  4876  	pp.status = _Pgcstop
  4877  	pp.sudogcache = pp.sudogbuf[:0]
  4878  	pp.deferpool = pp.deferpoolbuf[:0]
  4879  	pp.wbBuf.reset()
  4880  	if pp.mcache == nil {
  4881  		if id == 0 {
  4882  			if mcache0 == nil {
  4883  				throw("missing mcache?")
  4884  			}
  4885  			// Use the bootstrap mcache0. Only one P will get
  4886  			// mcache0: the one with ID 0.
  4887  			pp.mcache = mcache0
  4888  		} else {
  4889  			pp.mcache = allocmcache()
  4890  		}
  4891  	}
  4892  	if raceenabled && pp.raceprocctx == 0 {
  4893  		if id == 0 {
  4894  			pp.raceprocctx = raceprocctx0
  4895  			raceprocctx0 = 0 // bootstrap
  4896  		} else {
  4897  			pp.raceprocctx = raceproccreate()
  4898  		}
  4899  	}
  4900  	lockInit(&pp.timersLock, lockRankTimers)
  4901  
  4902  	// This P may get timers when it starts running. Set the mask here
  4903  	// since the P may not go through pidleget (notably P 0 on startup).
  4904  	timerpMask.set(id)
  4905  	// Similarly, we may not go through pidleget before this P starts
  4906  	// running if it is P 0 on startup.
  4907  	idlepMask.clear(id)
  4908  }
  4909  
  4910  // destroy releases all of the resources associated with pp and
  4911  // transitions it to status _Pdead.
  4912  //
  4913  // sched.lock must be held and the world must be stopped.
  4914  func (pp *p) destroy() {
  4915  	assertLockHeld(&sched.lock)
  4916  	assertWorldStopped()
  4917  
  4918  	// Move all runnable goroutines to the global queue
  4919  	for pp.runqhead != pp.runqtail {
  4920  		// Pop from tail of local queue
  4921  		pp.runqtail--
  4922  		gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
  4923  		// Push onto head of global queue
  4924  		globrunqputhead(gp)
  4925  	}
  4926  	if pp.runnext != 0 {
  4927  		globrunqputhead(pp.runnext.ptr())
  4928  		pp.runnext = 0
  4929  	}
  4930  	if len(pp.timers) > 0 {
  4931  		plocal := getg().m.p.ptr()
  4932  		// The world is stopped, but we acquire timersLock to
  4933  		// protect against sysmon calling timeSleepUntil.
  4934  		// This is the only case where we hold the timersLock of
  4935  		// more than one P, so there are no deadlock concerns.
  4936  		lock(&plocal.timersLock)
  4937  		lock(&pp.timersLock)
  4938  		moveTimers(plocal, pp.timers)
  4939  		pp.timers = nil
  4940  		pp.numTimers.Store(0)
  4941  		pp.deletedTimers.Store(0)
  4942  		pp.timer0When.Store(0)
  4943  		unlock(&pp.timersLock)
  4944  		unlock(&plocal.timersLock)
  4945  	}
  4946  	// Flush p's write barrier buffer.
  4947  	if gcphase != _GCoff {
  4948  		wbBufFlush1(pp)
  4949  		pp.gcw.dispose()
  4950  	}
  4951  	for i := range pp.sudogbuf {
  4952  		pp.sudogbuf[i] = nil
  4953  	}
  4954  	pp.sudogcache = pp.sudogbuf[:0]
  4955  	for j := range pp.deferpoolbuf {
  4956  		pp.deferpoolbuf[j] = nil
  4957  	}
  4958  	pp.deferpool = pp.deferpoolbuf[:0]
  4959  	systemstack(func() {
  4960  		for i := 0; i < pp.mspancache.len; i++ {
  4961  			// Safe to call since the world is stopped.
  4962  			mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
  4963  		}
  4964  		pp.mspancache.len = 0
  4965  		lock(&mheap_.lock)
  4966  		pp.pcache.flush(&mheap_.pages)
  4967  		unlock(&mheap_.lock)
  4968  	})
  4969  	freemcache(pp.mcache)
  4970  	pp.mcache = nil
  4971  	gfpurge(pp)
  4972  	traceProcFree(pp)
  4973  	if raceenabled {
  4974  		if pp.timerRaceCtx != 0 {
  4975  			// The race detector code uses a callback to fetch
  4976  			// the proc context, so arrange for that callback
  4977  			// to see the right thing.
  4978  			// This hack only works because we are the only
  4979  			// thread running.
  4980  			mp := getg().m
  4981  			phold := mp.p.ptr()
  4982  			mp.p.set(pp)
  4983  
  4984  			racectxend(pp.timerRaceCtx)
  4985  			pp.timerRaceCtx = 0
  4986  
  4987  			mp.p.set(phold)
  4988  		}
  4989  		raceprocdestroy(pp.raceprocctx)
  4990  		pp.raceprocctx = 0
  4991  	}
  4992  	pp.gcAssistTime = 0
  4993  	pp.status = _Pdead
  4994  }
  4995  
  4996  // Change number of processors.
  4997  //
  4998  // sched.lock must be held, and the world must be stopped.
  4999  //
  5000  // gcworkbufs must not be being modified by either the GC or the write barrier
  5001  // code, so the GC must not be running if the number of Ps actually changes.
  5002  //
  5003  // Returns list of Ps with local work, they need to be scheduled by the caller.
  5004  func procresize(nprocs int32) *p {
  5005  	assertLockHeld(&sched.lock)
  5006  	assertWorldStopped()
  5007  
  5008  	old := gomaxprocs
  5009  	if old < 0 || nprocs <= 0 {
  5010  		throw("procresize: invalid arg")
  5011  	}
  5012  	if traceEnabled() {
  5013  		traceGomaxprocs(nprocs)
  5014  	}
  5015  
  5016  	// update statistics
  5017  	now := nanotime()
  5018  	if sched.procresizetime != 0 {
  5019  		sched.totaltime += int64(old) * (now - sched.procresizetime)
  5020  	}
  5021  	sched.procresizetime = now
  5022  
  5023  	maskWords := (nprocs + 31) / 32
  5024  
  5025  	// Grow allp if necessary.
  5026  	if nprocs > int32(len(allp)) {
  5027  		// Synchronize with retake, which could be running
  5028  		// concurrently since it doesn't run on a P.
  5029  		lock(&allpLock)
  5030  		if nprocs <= int32(cap(allp)) {
  5031  			allp = allp[:nprocs]
  5032  		} else {
  5033  			nallp := make([]*p, nprocs)
  5034  			// Copy everything up to allp's cap so we
  5035  			// never lose old allocated Ps.
  5036  			copy(nallp, allp[:cap(allp)])
  5037  			allp = nallp
  5038  		}
  5039  
  5040  		if maskWords <= int32(cap(idlepMask)) {
  5041  			idlepMask = idlepMask[:maskWords]
  5042  			timerpMask = timerpMask[:maskWords]
  5043  		} else {
  5044  			nidlepMask := make([]uint32, maskWords)
  5045  			// No need to copy beyond len, old Ps are irrelevant.
  5046  			copy(nidlepMask, idlepMask)
  5047  			idlepMask = nidlepMask
  5048  
  5049  			ntimerpMask := make([]uint32, maskWords)
  5050  			copy(ntimerpMask, timerpMask)
  5051  			timerpMask = ntimerpMask
  5052  		}
  5053  		unlock(&allpLock)
  5054  	}
  5055  
  5056  	// initialize new P's
  5057  	for i := old; i < nprocs; i++ {
  5058  		pp := allp[i]
  5059  		if pp == nil {
  5060  			pp = new(p)
  5061  		}
  5062  		pp.init(i)
  5063  		atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
  5064  	}
  5065  
  5066  	gp := getg()
  5067  	if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
  5068  		// continue to use the current P
  5069  		gp.m.p.ptr().status = _Prunning
  5070  		gp.m.p.ptr().mcache.prepareForSweep()
  5071  	} else {
  5072  		// release the current P and acquire allp[0].
  5073  		//
  5074  		// We must do this before destroying our current P
  5075  		// because p.destroy itself has write barriers, so we
  5076  		// need to do that from a valid P.
  5077  		if gp.m.p != 0 {
  5078  			if traceEnabled() {
  5079  				// Pretend that we were descheduled
  5080  				// and then scheduled again to keep
  5081  				// the trace sane.
  5082  				traceGoSched()
  5083  				traceProcStop(gp.m.p.ptr())
  5084  			}
  5085  			gp.m.p.ptr().m = 0
  5086  		}
  5087  		gp.m.p = 0
  5088  		pp := allp[0]
  5089  		pp.m = 0
  5090  		pp.status = _Pidle
  5091  		acquirep(pp)
  5092  		if traceEnabled() {
  5093  			traceGoStart()
  5094  		}
  5095  	}
  5096  
  5097  	// g.m.p is now set, so we no longer need mcache0 for bootstrapping.
  5098  	mcache0 = nil
  5099  
  5100  	// release resources from unused P's
  5101  	for i := nprocs; i < old; i++ {
  5102  		pp := allp[i]
  5103  		pp.destroy()
  5104  		// can't free P itself because it can be referenced by an M in syscall
  5105  	}
  5106  
  5107  	// Trim allp.
  5108  	if int32(len(allp)) != nprocs {
  5109  		lock(&allpLock)
  5110  		allp = allp[:nprocs]
  5111  		idlepMask = idlepMask[:maskWords]
  5112  		timerpMask = timerpMask[:maskWords]
  5113  		unlock(&allpLock)
  5114  	}
  5115  
  5116  	var runnablePs *p
  5117  	for i := nprocs - 1; i >= 0; i-- {
  5118  		pp := allp[i]
  5119  		if gp.m.p.ptr() == pp {
  5120  			continue
  5121  		}
  5122  		pp.status = _Pidle
  5123  		if runqempty(pp) {
  5124  			pidleput(pp, now)
  5125  		} else {
  5126  			pp.m.set(mget())
  5127  			pp.link.set(runnablePs)
  5128  			runnablePs = pp
  5129  		}
  5130  	}
  5131  	stealOrder.reset(uint32(nprocs))
  5132  	var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
  5133  	atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
  5134  	if old != nprocs {
  5135  		// Notify the limiter that the amount of procs has changed.
  5136  		gcCPULimiter.resetCapacity(now, nprocs)
  5137  	}
  5138  	return runnablePs
  5139  }
  5140  
  5141  // Associate p and the current m.
  5142  //
  5143  // This function is allowed to have write barriers even if the caller
  5144  // isn't because it immediately acquires pp.
  5145  //
  5146  //go:yeswritebarrierrec
  5147  func acquirep(pp *p) {
  5148  	// Do the part that isn't allowed to have write barriers.
  5149  	wirep(pp)
  5150  
  5151  	// Have p; write barriers now allowed.
  5152  
  5153  	// Perform deferred mcache flush before this P can allocate
  5154  	// from a potentially stale mcache.
  5155  	pp.mcache.prepareForSweep()
  5156  
  5157  	if traceEnabled() {
  5158  		traceProcStart()
  5159  	}
  5160  }
  5161  
  5162  // wirep is the first step of acquirep, which actually associates the
  5163  // current M to pp. This is broken out so we can disallow write
  5164  // barriers for this part, since we don't yet have a P.
  5165  //
  5166  //go:nowritebarrierrec
  5167  //go:nosplit
  5168  func wirep(pp *p) {
  5169  	gp := getg()
  5170  
  5171  	if gp.m.p != 0 {
  5172  		throw("wirep: already in go")
  5173  	}
  5174  	if pp.m != 0 || pp.status != _Pidle {
  5175  		id := int64(0)
  5176  		if pp.m != 0 {
  5177  			id = pp.m.ptr().id
  5178  		}
  5179  		print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
  5180  		throw("wirep: invalid p state")
  5181  	}
  5182  	gp.m.p.set(pp)
  5183  	pp.m.set(gp.m)
  5184  	pp.status = _Prunning
  5185  }
  5186  
  5187  // Disassociate p and the current m.
  5188  func releasep() *p {
  5189  	gp := getg()
  5190  
  5191  	if gp.m.p == 0 {
  5192  		throw("releasep: invalid arg")
  5193  	}
  5194  	pp := gp.m.p.ptr()
  5195  	if pp.m.ptr() != gp.m || pp.status != _Prunning {
  5196  		print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
  5197  		throw("releasep: invalid p state")
  5198  	}
  5199  	if traceEnabled() {
  5200  		traceProcStop(gp.m.p.ptr())
  5201  	}
  5202  	gp.m.p = 0
  5203  	pp.m = 0
  5204  	pp.status = _Pidle
  5205  	return pp
  5206  }
  5207  
  5208  func incidlelocked(v int32) {
  5209  	lock(&sched.lock)
  5210  	sched.nmidlelocked += v
  5211  	if v > 0 {
  5212  		checkdead()
  5213  	}
  5214  	unlock(&sched.lock)
  5215  }
  5216  
  5217  // Check for deadlock situation.
  5218  // The check is based on number of running M's, if 0 -> deadlock.
  5219  // sched.lock must be held.
  5220  func checkdead() {
  5221  	assertLockHeld(&sched.lock)
  5222  
  5223  	// For -buildmode=c-shared or -buildmode=c-archive it's OK if
  5224  	// there are no running goroutines. The calling program is
  5225  	// assumed to be running.
  5226  	if islibrary || isarchive {
  5227  		return
  5228  	}
  5229  
  5230  	// If we are dying because of a signal caught on an already idle thread,
  5231  	// freezetheworld will cause all running threads to block.
  5232  	// And runtime will essentially enter into deadlock state,
  5233  	// except that there is a thread that will call exit soon.
  5234  	if panicking.Load() > 0 {
  5235  		return
  5236  	}
  5237  
  5238  	// If we are not running under cgo, but we have an extra M then account
  5239  	// for it. (It is possible to have an extra M on Windows without cgo to
  5240  	// accommodate callbacks created by syscall.NewCallback. See issue #6751
  5241  	// for details.)
  5242  	var run0 int32
  5243  	if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
  5244  		run0 = 1
  5245  	}
  5246  
  5247  	run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
  5248  	if run > run0 {
  5249  		return
  5250  	}
  5251  	if run < 0 {
  5252  		print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
  5253  		unlock(&sched.lock)
  5254  		throw("checkdead: inconsistent counts")
  5255  	}
  5256  
  5257  	grunning := 0
  5258  	forEachG(func(gp *g) {
  5259  		if isSystemGoroutine(gp, false) {
  5260  			return
  5261  		}
  5262  		s := readgstatus(gp)
  5263  		switch s &^ _Gscan {
  5264  		case _Gwaiting,
  5265  			_Gpreempted:
  5266  			grunning++
  5267  		case _Grunnable,
  5268  			_Grunning,
  5269  			_Gsyscall:
  5270  			print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
  5271  			unlock(&sched.lock)
  5272  			throw("checkdead: runnable g")
  5273  		}
  5274  	})
  5275  	if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
  5276  		unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang
  5277  		fatal("no goroutines (main called runtime.Goexit) - deadlock!")
  5278  	}
  5279  
  5280  	// Maybe jump time forward for playground.
  5281  	if faketime != 0 {
  5282  		if when := timeSleepUntil(); when < maxWhen {
  5283  			faketime = when
  5284  
  5285  			// Start an M to steal the timer.
  5286  			pp, _ := pidleget(faketime)
  5287  			if pp == nil {
  5288  				// There should always be a free P since
  5289  				// nothing is running.
  5290  				unlock(&sched.lock)
  5291  				throw("checkdead: no p for timer")
  5292  			}
  5293  			mp := mget()
  5294  			if mp == nil {
  5295  				// There should always be a free M since
  5296  				// nothing is running.
  5297  				unlock(&sched.lock)
  5298  				throw("checkdead: no m for timer")
  5299  			}
  5300  			// M must be spinning to steal. We set this to be
  5301  			// explicit, but since this is the only M it would
  5302  			// become spinning on its own anyways.
  5303  			sched.nmspinning.Add(1)
  5304  			mp.spinning = true
  5305  			mp.nextp.set(pp)
  5306  			notewakeup(&mp.park)
  5307  			return
  5308  		}
  5309  	}
  5310  
  5311  	// There are no goroutines running, so we can look at the P's.
  5312  	for _, pp := range allp {
  5313  		if len(pp.timers) > 0 {
  5314  			return
  5315  		}
  5316  	}
  5317  
  5318  	unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang
  5319  	fatal("all goroutines are asleep - deadlock!")
  5320  }
  5321  
  5322  // forcegcperiod is the maximum time in nanoseconds between garbage
  5323  // collections. If we go this long without a garbage collection, one
  5324  // is forced to run.
  5325  //
  5326  // This is a variable for testing purposes. It normally doesn't change.
  5327  var forcegcperiod int64 = 2 * 60 * 1e9
  5328  
  5329  // needSysmonWorkaround is true if the workaround for
  5330  // golang.org/issue/42515 is needed on NetBSD.
  5331  var needSysmonWorkaround bool = false
  5332  
  5333  // Always runs without a P, so write barriers are not allowed.
  5334  //
  5335  //go:nowritebarrierrec
  5336  func sysmon() {
  5337  	lock(&sched.lock)
  5338  	sched.nmsys++
  5339  	checkdead()
  5340  	unlock(&sched.lock)
  5341  
  5342  	lasttrace := int64(0)
  5343  	idle := 0 // how many cycles in succession we had not wokeup somebody
  5344  	delay := uint32(0)
  5345  
  5346  	for {
  5347  		if idle == 0 { // start with 20us sleep...
  5348  			delay = 20
  5349  		} else if idle > 50 { // start doubling the sleep after 1ms...
  5350  			delay *= 2
  5351  		}
  5352  		if delay > 10*1000 { // up to 10ms
  5353  			delay = 10 * 1000
  5354  		}
  5355  		usleep(delay)
  5356  
  5357  		// sysmon should not enter deep sleep if schedtrace is enabled so that
  5358  		// it can print that information at the right time.
  5359  		//
  5360  		// It should also not enter deep sleep if there are any active P's so
  5361  		// that it can retake P's from syscalls, preempt long running G's, and
  5362  		// poll the network if all P's are busy for long stretches.
  5363  		//
  5364  		// It should wakeup from deep sleep if any P's become active either due
  5365  		// to exiting a syscall or waking up due to a timer expiring so that it
  5366  		// can resume performing those duties. If it wakes from a syscall it
  5367  		// resets idle and delay as a bet that since it had retaken a P from a
  5368  		// syscall before, it may need to do it again shortly after the
  5369  		// application starts work again. It does not reset idle when waking
  5370  		// from a timer to avoid adding system load to applications that spend
  5371  		// most of their time sleeping.
  5372  		now := nanotime()
  5373  		if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
  5374  			lock(&sched.lock)
  5375  			if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
  5376  				syscallWake := false
  5377  				next := timeSleepUntil()
  5378  				if next > now {
  5379  					sched.sysmonwait.Store(true)
  5380  					unlock(&sched.lock)
  5381  					// Make wake-up period small enough
  5382  					// for the sampling to be correct.
  5383  					sleep := forcegcperiod / 2
  5384  					if next-now < sleep {
  5385  						sleep = next - now
  5386  					}
  5387  					shouldRelax := sleep >= osRelaxMinNS
  5388  					if shouldRelax {
  5389  						osRelax(true)
  5390  					}
  5391  					syscallWake = notetsleep(&sched.sysmonnote, sleep)
  5392  					if shouldRelax {
  5393  						osRelax(false)
  5394  					}
  5395  					lock(&sched.lock)
  5396  					sched.sysmonwait.Store(false)
  5397  					noteclear(&sched.sysmonnote)
  5398  				}
  5399  				if syscallWake {
  5400  					idle = 0
  5401  					delay = 20
  5402  				}
  5403  			}
  5404  			unlock(&sched.lock)
  5405  		}
  5406  
  5407  		lock(&sched.sysmonlock)
  5408  		// Update now in case we blocked on sysmonnote or spent a long time
  5409  		// blocked on schedlock or sysmonlock above.
  5410  		now = nanotime()
  5411  
  5412  		// trigger libc interceptors if needed
  5413  		if *cgo_yield != nil {
  5414  			asmcgocall(*cgo_yield, nil)
  5415  		}
  5416  		// poll network if not polled for more than 10ms
  5417  		lastpoll := sched.lastpoll.Load()
  5418  		if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
  5419  			sched.lastpoll.CompareAndSwap(lastpoll, now)
  5420  			list := netpoll(0) // non-blocking - returns list of goroutines
  5421  			if !list.empty() {
  5422  				// Need to decrement number of idle locked M's
  5423  				// (pretending that one more is running) before injectglist.
  5424  				// Otherwise it can lead to the following situation:
  5425  				// injectglist grabs all P's but before it starts M's to run the P's,
  5426  				// another M returns from syscall, finishes running its G,
  5427  				// observes that there is no work to do and no other running M's
  5428  				// and reports deadlock.
  5429  				incidlelocked(-1)
  5430  				injectglist(&list)
  5431  				incidlelocked(1)
  5432  			}
  5433  		}
  5434  		if GOOS == "netbsd" && needSysmonWorkaround {
  5435  			// netpoll is responsible for waiting for timer
  5436  			// expiration, so we typically don't have to worry
  5437  			// about starting an M to service timers. (Note that
  5438  			// sleep for timeSleepUntil above simply ensures sysmon
  5439  			// starts running again when that timer expiration may
  5440  			// cause Go code to run again).
  5441  			//
  5442  			// However, netbsd has a kernel bug that sometimes
  5443  			// misses netpollBreak wake-ups, which can lead to
  5444  			// unbounded delays servicing timers. If we detect this
  5445  			// overrun, then startm to get something to handle the
  5446  			// timer.
  5447  			//
  5448  			// See issue 42515 and
  5449  			// https://gnats.netbsd.org/cgi-bin/query-pr-single.pl?number=50094.
  5450  			if next := timeSleepUntil(); next < now {
  5451  				startm(nil, false, false)
  5452  			}
  5453  		}
  5454  		if scavenger.sysmonWake.Load() != 0 {
  5455  			// Kick the scavenger awake if someone requested it.
  5456  			scavenger.wake()
  5457  		}
  5458  		// retake P's blocked in syscalls
  5459  		// and preempt long running G's
  5460  		if retake(now) != 0 {
  5461  			idle = 0
  5462  		} else {
  5463  			idle++
  5464  		}
  5465  		// check if we need to force a GC
  5466  		if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
  5467  			lock(&forcegc.lock)
  5468  			forcegc.idle.Store(false)
  5469  			var list gList
  5470  			list.push(forcegc.g)
  5471  			injectglist(&list)
  5472  			unlock(&forcegc.lock)
  5473  		}
  5474  		if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
  5475  			lasttrace = now
  5476  			schedtrace(debug.scheddetail > 0)
  5477  		}
  5478  		unlock(&sched.sysmonlock)
  5479  	}
  5480  }
  5481  
  5482  type sysmontick struct {
  5483  	schedtick   uint32
  5484  	schedwhen   int64
  5485  	syscalltick uint32
  5486  	syscallwhen int64
  5487  }
  5488  
  5489  // forcePreemptNS is the time slice given to a G before it is
  5490  // preempted.
  5491  const forcePreemptNS = 10 * 1000 * 1000 // 10ms
  5492  
  5493  func retake(now int64) uint32 {
  5494  	n := 0
  5495  	// Prevent allp slice changes. This lock will be completely
  5496  	// uncontended unless we're already stopping the world.
  5497  	lock(&allpLock)
  5498  	// We can't use a range loop over allp because we may
  5499  	// temporarily drop the allpLock. Hence, we need to re-fetch
  5500  	// allp each time around the loop.
  5501  	for i := 0; i < len(allp); i++ {
  5502  		pp := allp[i]
  5503  		if pp == nil {
  5504  			// This can happen if procresize has grown
  5505  			// allp but not yet created new Ps.
  5506  			continue
  5507  		}
  5508  		pd := &pp.sysmontick
  5509  		s := pp.status
  5510  		sysretake := false
  5511  		if s == _Prunning || s == _Psyscall {
  5512  			// Preempt G if it's running for too long.
  5513  			t := int64(pp.schedtick)
  5514  			if int64(pd.schedtick) != t {
  5515  				pd.schedtick = uint32(t)
  5516  				pd.schedwhen = now
  5517  			} else if pd.schedwhen+forcePreemptNS <= now {
  5518  				preemptone(pp)
  5519  				// In case of syscall, preemptone() doesn't
  5520  				// work, because there is no M wired to P.
  5521  				sysretake = true
  5522  			}
  5523  		}
  5524  		if s == _Psyscall {
  5525  			// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
  5526  			t := int64(pp.syscalltick)
  5527  			if !sysretake && int64(pd.syscalltick) != t {
  5528  				pd.syscalltick = uint32(t)
  5529  				pd.syscallwhen = now
  5530  				continue
  5531  			}
  5532  			// On the one hand we don't want to retake Ps if there is no other work to do,
  5533  			// but on the other hand we want to retake them eventually
  5534  			// because they can prevent the sysmon thread from deep sleep.
  5535  			if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
  5536  				continue
  5537  			}
  5538  			// Drop allpLock so we can take sched.lock.
  5539  			unlock(&allpLock)
  5540  			// Need to decrement number of idle locked M's
  5541  			// (pretending that one more is running) before the CAS.
  5542  			// Otherwise the M from which we retake can exit the syscall,
  5543  			// increment nmidle and report deadlock.
  5544  			incidlelocked(-1)
  5545  			if atomic.Cas(&pp.status, s, _Pidle) {
  5546  				if traceEnabled() {
  5547  					traceGoSysBlock(pp)
  5548  					traceProcStop(pp)
  5549  				}
  5550  				n++
  5551  				pp.syscalltick++
  5552  				handoffp(pp)
  5553  			}
  5554  			incidlelocked(1)
  5555  			lock(&allpLock)
  5556  		}
  5557  	}
  5558  	unlock(&allpLock)
  5559  	return uint32(n)
  5560  }
  5561  
  5562  // Tell all goroutines that they have been preempted and they should stop.
  5563  // This function is purely best-effort. It can fail to inform a goroutine if a
  5564  // processor just started running it.
  5565  // No locks need to be held.
  5566  // Returns true if preemption request was issued to at least one goroutine.
  5567  func preemptall() bool {
  5568  	res := false
  5569  	for _, pp := range allp {
  5570  		if pp.status != _Prunning {
  5571  			continue
  5572  		}
  5573  		if preemptone(pp) {
  5574  			res = true
  5575  		}
  5576  	}
  5577  	return res
  5578  }
  5579  
  5580  // Tell the goroutine running on processor P to stop.
  5581  // This function is purely best-effort. It can incorrectly fail to inform the
  5582  // goroutine. It can inform the wrong goroutine. Even if it informs the
  5583  // correct goroutine, that goroutine might ignore the request if it is
  5584  // simultaneously executing newstack.
  5585  // No lock needs to be held.
  5586  // Returns true if preemption request was issued.
  5587  // The actual preemption will happen at some point in the future
  5588  // and will be indicated by the gp->status no longer being
  5589  // Grunning
  5590  func preemptone(pp *p) bool {
  5591  	mp := pp.m.ptr()
  5592  	if mp == nil || mp == getg().m {
  5593  		return false
  5594  	}
  5595  	gp := mp.curg
  5596  	if gp == nil || gp == mp.g0 {
  5597  		return false
  5598  	}
  5599  
  5600  	gp.preempt = true
  5601  
  5602  	// Every call in a goroutine checks for stack overflow by
  5603  	// comparing the current stack pointer to gp->stackguard0.
  5604  	// Setting gp->stackguard0 to StackPreempt folds
  5605  	// preemption into the normal stack overflow check.
  5606  	gp.stackguard0 = stackPreempt
  5607  
  5608  	// Request an async preemption of this P.
  5609  	if preemptMSupported && debug.asyncpreemptoff == 0 {
  5610  		pp.preempt = true
  5611  		preemptM(mp)
  5612  	}
  5613  
  5614  	return true
  5615  }
  5616  
  5617  var starttime int64
  5618  
  5619  func schedtrace(detailed bool) {
  5620  	now := nanotime()
  5621  	if starttime == 0 {
  5622  		starttime = now
  5623  	}
  5624  
  5625  	lock(&sched.lock)
  5626  	print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
  5627  	if detailed {
  5628  		print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
  5629  	}
  5630  	// We must be careful while reading data from P's, M's and G's.
  5631  	// Even if we hold schedlock, most data can be changed concurrently.
  5632  	// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
  5633  	for i, pp := range allp {
  5634  		mp := pp.m.ptr()
  5635  		h := atomic.Load(&pp.runqhead)
  5636  		t := atomic.Load(&pp.runqtail)
  5637  		if detailed {
  5638  			print("  P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
  5639  			if mp != nil {
  5640  				print(mp.id)
  5641  			} else {
  5642  				print("nil")
  5643  			}
  5644  			print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers), "\n")
  5645  		} else {
  5646  			// In non-detailed mode format lengths of per-P run queues as:
  5647  			// [len1 len2 len3 len4]
  5648  			print(" ")
  5649  			if i == 0 {
  5650  				print("[")
  5651  			}
  5652  			print(t - h)
  5653  			if i == len(allp)-1 {
  5654  				print("]\n")
  5655  			}
  5656  		}
  5657  	}
  5658  
  5659  	if !detailed {
  5660  		unlock(&sched.lock)
  5661  		return
  5662  	}
  5663  
  5664  	for mp := allm; mp != nil; mp = mp.alllink {
  5665  		pp := mp.p.ptr()
  5666  		print("  M", mp.id, ": p=")
  5667  		if pp != nil {
  5668  			print(pp.id)
  5669  		} else {
  5670  			print("nil")
  5671  		}
  5672  		print(" curg=")
  5673  		if mp.curg != nil {
  5674  			print(mp.curg.goid)
  5675  		} else {
  5676  			print("nil")
  5677  		}
  5678  		print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
  5679  		if lockedg := mp.lockedg.ptr(); lockedg != nil {
  5680  			print(lockedg.goid)
  5681  		} else {
  5682  			print("nil")
  5683  		}
  5684  		print("\n")
  5685  	}
  5686  
  5687  	forEachG(func(gp *g) {
  5688  		print("  G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
  5689  		if gp.m != nil {
  5690  			print(gp.m.id)
  5691  		} else {
  5692  			print("nil")
  5693  		}
  5694  		print(" lockedm=")
  5695  		if lockedm := gp.lockedm.ptr(); lockedm != nil {
  5696  			print(lockedm.id)
  5697  		} else {
  5698  			print("nil")
  5699  		}
  5700  		print("\n")
  5701  	})
  5702  	unlock(&sched.lock)
  5703  }
  5704  
  5705  // schedEnableUser enables or disables the scheduling of user
  5706  // goroutines.
  5707  //
  5708  // This does not stop already running user goroutines, so the caller
  5709  // should first stop the world when disabling user goroutines.
  5710  func schedEnableUser(enable bool) {
  5711  	lock(&sched.lock)
  5712  	if sched.disable.user == !enable {
  5713  		unlock(&sched.lock)
  5714  		return
  5715  	}
  5716  	sched.disable.user = !enable
  5717  	if enable {
  5718  		n := sched.disable.n
  5719  		sched.disable.n = 0
  5720  		globrunqputbatch(&sched.disable.runnable, n)
  5721  		unlock(&sched.lock)
  5722  		for ; n != 0 && sched.npidle.Load() != 0; n-- {
  5723  			startm(nil, false, false)
  5724  		}
  5725  	} else {
  5726  		unlock(&sched.lock)
  5727  	}
  5728  }
  5729  
  5730  // schedEnabled reports whether gp should be scheduled. It returns
  5731  // false is scheduling of gp is disabled.
  5732  //
  5733  // sched.lock must be held.
  5734  func schedEnabled(gp *g) bool {
  5735  	assertLockHeld(&sched.lock)
  5736  
  5737  	if sched.disable.user {
  5738  		return isSystemGoroutine(gp, true)
  5739  	}
  5740  	return true
  5741  }
  5742  
  5743  // Put mp on midle list.
  5744  // sched.lock must be held.
  5745  // May run during STW, so write barriers are not allowed.
  5746  //
  5747  //go:nowritebarrierrec
  5748  func mput(mp *m) {
  5749  	assertLockHeld(&sched.lock)
  5750  
  5751  	mp.schedlink = sched.midle
  5752  	sched.midle.set(mp)
  5753  	sched.nmidle++
  5754  	checkdead()
  5755  }
  5756  
  5757  // Try to get an m from midle list.
  5758  // sched.lock must be held.
  5759  // May run during STW, so write barriers are not allowed.
  5760  //
  5761  //go:nowritebarrierrec
  5762  func mget() *m {
  5763  	assertLockHeld(&sched.lock)
  5764  
  5765  	mp := sched.midle.ptr()
  5766  	if mp != nil {
  5767  		sched.midle = mp.schedlink
  5768  		sched.nmidle--
  5769  	}
  5770  	return mp
  5771  }
  5772  
  5773  // Put gp on the global runnable queue.
  5774  // sched.lock must be held.
  5775  // May run during STW, so write barriers are not allowed.
  5776  //
  5777  //go:nowritebarrierrec
  5778  func globrunqput(gp *g) {
  5779  	assertLockHeld(&sched.lock)
  5780  
  5781  	sched.runq.pushBack(gp)
  5782  	sched.runqsize++
  5783  }
  5784  
  5785  // Put gp at the head of the global runnable queue.
  5786  // sched.lock must be held.
  5787  // May run during STW, so write barriers are not allowed.
  5788  //
  5789  //go:nowritebarrierrec
  5790  func globrunqputhead(gp *g) {
  5791  	assertLockHeld(&sched.lock)
  5792  
  5793  	sched.runq.push(gp)
  5794  	sched.runqsize++
  5795  }
  5796  
  5797  // Put a batch of runnable goroutines on the global runnable queue.
  5798  // This clears *batch.
  5799  // sched.lock must be held.
  5800  // May run during STW, so write barriers are not allowed.
  5801  //
  5802  //go:nowritebarrierrec
  5803  func globrunqputbatch(batch *gQueue, n int32) {
  5804  	assertLockHeld(&sched.lock)
  5805  
  5806  	sched.runq.pushBackAll(*batch)
  5807  	sched.runqsize += n
  5808  	*batch = gQueue{}
  5809  }
  5810  
  5811  // Try get a batch of G's from the global runnable queue.
  5812  // sched.lock must be held.
  5813  func globrunqget(pp *p, max int32) *g {
  5814  	assertLockHeld(&sched.lock)
  5815  
  5816  	if sched.runqsize == 0 {
  5817  		return nil
  5818  	}
  5819  
  5820  	n := sched.runqsize/gomaxprocs + 1
  5821  	if n > sched.runqsize {
  5822  		n = sched.runqsize
  5823  	}
  5824  	if max > 0 && n > max {
  5825  		n = max
  5826  	}
  5827  	if n > int32(len(pp.runq))/2 {
  5828  		n = int32(len(pp.runq)) / 2
  5829  	}
  5830  
  5831  	sched.runqsize -= n
  5832  
  5833  	gp := sched.runq.pop()
  5834  	n--
  5835  	for ; n > 0; n-- {
  5836  		gp1 := sched.runq.pop()
  5837  		runqput(pp, gp1, false)
  5838  	}
  5839  	return gp
  5840  }
  5841  
  5842  // pMask is an atomic bitstring with one bit per P.
  5843  type pMask []uint32
  5844  
  5845  // read returns true if P id's bit is set.
  5846  func (p pMask) read(id uint32) bool {
  5847  	word := id / 32
  5848  	mask := uint32(1) << (id % 32)
  5849  	return (atomic.Load(&p[word]) & mask) != 0
  5850  }
  5851  
  5852  // set sets P id's bit.
  5853  func (p pMask) set(id int32) {
  5854  	word := id / 32
  5855  	mask := uint32(1) << (id % 32)
  5856  	atomic.Or(&p[word], mask)
  5857  }
  5858  
  5859  // clear clears P id's bit.
  5860  func (p pMask) clear(id int32) {
  5861  	word := id / 32
  5862  	mask := uint32(1) << (id % 32)
  5863  	atomic.And(&p[word], ^mask)
  5864  }
  5865  
  5866  // updateTimerPMask clears pp's timer mask if it has no timers on its heap.
  5867  //
  5868  // Ideally, the timer mask would be kept immediately consistent on any timer
  5869  // operations. Unfortunately, updating a shared global data structure in the
  5870  // timer hot path adds too much overhead in applications frequently switching
  5871  // between no timers and some timers.
  5872  //
  5873  // As a compromise, the timer mask is updated only on pidleget / pidleput. A
  5874  // running P (returned by pidleget) may add a timer at any time, so its mask
  5875  // must be set. An idle P (passed to pidleput) cannot add new timers while
  5876  // idle, so if it has no timers at that time, its mask may be cleared.
  5877  //
  5878  // Thus, we get the following effects on timer-stealing in findrunnable:
  5879  //
  5880  //   - Idle Ps with no timers when they go idle are never checked in findrunnable
  5881  //     (for work- or timer-stealing; this is the ideal case).
  5882  //   - Running Ps must always be checked.
  5883  //   - Idle Ps whose timers are stolen must continue to be checked until they run
  5884  //     again, even after timer expiration.
  5885  //
  5886  // When the P starts running again, the mask should be set, as a timer may be
  5887  // added at any time.
  5888  //
  5889  // TODO(prattmic): Additional targeted updates may improve the above cases.
  5890  // e.g., updating the mask when stealing a timer.
  5891  func updateTimerPMask(pp *p) {
  5892  	if pp.numTimers.Load() > 0 {
  5893  		return
  5894  	}
  5895  
  5896  	// Looks like there are no timers, however another P may transiently
  5897  	// decrement numTimers when handling a timerModified timer in
  5898  	// checkTimers. We must take timersLock to serialize with these changes.
  5899  	lock(&pp.timersLock)
  5900  	if pp.numTimers.Load() == 0 {
  5901  		timerpMask.clear(pp.id)
  5902  	}
  5903  	unlock(&pp.timersLock)
  5904  }
  5905  
  5906  // pidleput puts p on the _Pidle list. now must be a relatively recent call
  5907  // to nanotime or zero. Returns now or the current time if now was zero.
  5908  //
  5909  // This releases ownership of p. Once sched.lock is released it is no longer
  5910  // safe to use p.
  5911  //
  5912  // sched.lock must be held.
  5913  //
  5914  // May run during STW, so write barriers are not allowed.
  5915  //
  5916  //go:nowritebarrierrec
  5917  func pidleput(pp *p, now int64) int64 {
  5918  	assertLockHeld(&sched.lock)
  5919  
  5920  	if !runqempty(pp) {
  5921  		throw("pidleput: P has non-empty run queue")
  5922  	}
  5923  	if now == 0 {
  5924  		now = nanotime()
  5925  	}
  5926  	updateTimerPMask(pp) // clear if there are no timers.
  5927  	idlepMask.set(pp.id)
  5928  	pp.link = sched.pidle
  5929  	sched.pidle.set(pp)
  5930  	sched.npidle.Add(1)
  5931  	if !pp.limiterEvent.start(limiterEventIdle, now) {
  5932  		throw("must be able to track idle limiter event")
  5933  	}
  5934  	return now
  5935  }
  5936  
  5937  // pidleget tries to get a p from the _Pidle list, acquiring ownership.
  5938  //
  5939  // sched.lock must be held.
  5940  //
  5941  // May run during STW, so write barriers are not allowed.
  5942  //
  5943  //go:nowritebarrierrec
  5944  func pidleget(now int64) (*p, int64) {
  5945  	assertLockHeld(&sched.lock)
  5946  
  5947  	pp := sched.pidle.ptr()
  5948  	if pp != nil {
  5949  		// Timer may get added at any time now.
  5950  		if now == 0 {
  5951  			now = nanotime()
  5952  		}
  5953  		timerpMask.set(pp.id)
  5954  		idlepMask.clear(pp.id)
  5955  		sched.pidle = pp.link
  5956  		sched.npidle.Add(-1)
  5957  		pp.limiterEvent.stop(limiterEventIdle, now)
  5958  	}
  5959  	return pp, now
  5960  }
  5961  
  5962  // pidlegetSpinning tries to get a p from the _Pidle list, acquiring ownership.
  5963  // This is called by spinning Ms (or callers than need a spinning M) that have
  5964  // found work. If no P is available, this must synchronized with non-spinning
  5965  // Ms that may be preparing to drop their P without discovering this work.
  5966  //
  5967  // sched.lock must be held.
  5968  //
  5969  // May run during STW, so write barriers are not allowed.
  5970  //
  5971  //go:nowritebarrierrec
  5972  func pidlegetSpinning(now int64) (*p, int64) {
  5973  	assertLockHeld(&sched.lock)
  5974  
  5975  	pp, now := pidleget(now)
  5976  	if pp == nil {
  5977  		// See "Delicate dance" comment in findrunnable. We found work
  5978  		// that we cannot take, we must synchronize with non-spinning
  5979  		// Ms that may be preparing to drop their P.
  5980  		sched.needspinning.Store(1)
  5981  		return nil, now
  5982  	}
  5983  
  5984  	return pp, now
  5985  }
  5986  
  5987  // runqempty reports whether pp has no Gs on its local run queue.
  5988  // It never returns true spuriously.
  5989  func runqempty(pp *p) bool {
  5990  	// Defend against a race where 1) pp has G1 in runqnext but runqhead == runqtail,
  5991  	// 2) runqput on pp kicks G1 to the runq, 3) runqget on pp empties runqnext.
  5992  	// Simply observing that runqhead == runqtail and then observing that runqnext == nil
  5993  	// does not mean the queue is empty.
  5994  	for {
  5995  		head := atomic.Load(&pp.runqhead)
  5996  		tail := atomic.Load(&pp.runqtail)
  5997  		runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
  5998  		if tail == atomic.Load(&pp.runqtail) {
  5999  			return head == tail && runnext == 0
  6000  		}
  6001  	}
  6002  }
  6003  
  6004  // To shake out latent assumptions about scheduling order,
  6005  // we introduce some randomness into scheduling decisions
  6006  // when running with the race detector.
  6007  // The need for this was made obvious by changing the
  6008  // (deterministic) scheduling order in Go 1.5 and breaking
  6009  // many poorly-written tests.
  6010  // With the randomness here, as long as the tests pass
  6011  // consistently with -race, they shouldn't have latent scheduling
  6012  // assumptions.
  6013  const randomizeScheduler = raceenabled
  6014  
  6015  // runqput tries to put g on the local runnable queue.
  6016  // If next is false, runqput adds g to the tail of the runnable queue.
  6017  // If next is true, runqput puts g in the pp.runnext slot.
  6018  // If the run queue is full, runnext puts g on the global queue.
  6019  // Executed only by the owner P.
  6020  func runqput(pp *p, gp *g, next bool) {
  6021  	if randomizeScheduler && next && fastrandn(2) == 0 {
  6022  		next = false
  6023  	}
  6024  
  6025  	if next {
  6026  	retryNext:
  6027  		oldnext := pp.runnext
  6028  		if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
  6029  			goto retryNext
  6030  		}
  6031  		if oldnext == 0 {
  6032  			return
  6033  		}
  6034  		// Kick the old runnext out to the regular run queue.
  6035  		gp = oldnext.ptr()
  6036  	}
  6037  
  6038  retry:
  6039  	h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with consumers
  6040  	t := pp.runqtail
  6041  	if t-h < uint32(len(pp.runq)) {
  6042  		pp.runq[t%uint32(len(pp.runq))].set(gp)
  6043  		atomic.StoreRel(&pp.runqtail, t+1) // store-release, makes the item available for consumption
  6044  		return
  6045  	}
  6046  	if runqputslow(pp, gp, h, t) {
  6047  		return
  6048  	}
  6049  	// the queue is not full, now the put above must succeed
  6050  	goto retry
  6051  }
  6052  
  6053  // Put g and a batch of work from local runnable queue on global queue.
  6054  // Executed only by the owner P.
  6055  func runqputslow(pp *p, gp *g, h, t uint32) bool {
  6056  	var batch [len(pp.runq)/2 + 1]*g
  6057  
  6058  	// First, grab a batch from local queue.
  6059  	n := t - h
  6060  	n = n / 2
  6061  	if n != uint32(len(pp.runq)/2) {
  6062  		throw("runqputslow: queue is not full")
  6063  	}
  6064  	for i := uint32(0); i < n; i++ {
  6065  		batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
  6066  	}
  6067  	if !atomic.CasRel(&pp.runqhead, h, h+n) { // cas-release, commits consume
  6068  		return false
  6069  	}
  6070  	batch[n] = gp
  6071  
  6072  	if randomizeScheduler {
  6073  		for i := uint32(1); i <= n; i++ {
  6074  			j := fastrandn(i + 1)
  6075  			batch[i], batch[j] = batch[j], batch[i]
  6076  		}
  6077  	}
  6078  
  6079  	// Link the goroutines.
  6080  	for i := uint32(0); i < n; i++ {
  6081  		batch[i].schedlink.set(batch[i+1])
  6082  	}
  6083  	var q gQueue
  6084  	q.head.set(batch[0])
  6085  	q.tail.set(batch[n])
  6086  
  6087  	// Now put the batch on global queue.
  6088  	lock(&sched.lock)
  6089  	globrunqputbatch(&q, int32(n+1))
  6090  	unlock(&sched.lock)
  6091  	return true
  6092  }
  6093  
  6094  // runqputbatch tries to put all the G's on q on the local runnable queue.
  6095  // If the queue is full, they are put on the global queue; in that case
  6096  // this will temporarily acquire the scheduler lock.
  6097  // Executed only by the owner P.
  6098  func runqputbatch(pp *p, q *gQueue, qsize int) {
  6099  	h := atomic.LoadAcq(&pp.runqhead)
  6100  	t := pp.runqtail
  6101  	n := uint32(0)
  6102  	for !q.empty() && t-h < uint32(len(pp.runq)) {
  6103  		gp := q.pop()
  6104  		pp.runq[t%uint32(len(pp.runq))].set(gp)
  6105  		t++
  6106  		n++
  6107  	}
  6108  	qsize -= int(n)
  6109  
  6110  	if randomizeScheduler {
  6111  		off := func(o uint32) uint32 {
  6112  			return (pp.runqtail + o) % uint32(len(pp.runq))
  6113  		}
  6114  		for i := uint32(1); i < n; i++ {
  6115  			j := fastrandn(i + 1)
  6116  			pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
  6117  		}
  6118  	}
  6119  
  6120  	atomic.StoreRel(&pp.runqtail, t)
  6121  	if !q.empty() {
  6122  		lock(&sched.lock)
  6123  		globrunqputbatch(q, int32(qsize))
  6124  		unlock(&sched.lock)
  6125  	}
  6126  }
  6127  
  6128  // Get g from local runnable queue.
  6129  // If inheritTime is true, gp should inherit the remaining time in the
  6130  // current time slice. Otherwise, it should start a new time slice.
  6131  // Executed only by the owner P.
  6132  func runqget(pp *p) (gp *g, inheritTime bool) {
  6133  	// If there's a runnext, it's the next G to run.
  6134  	next := pp.runnext
  6135  	// If the runnext is non-0 and the CAS fails, it could only have been stolen by another P,
  6136  	// because other Ps can race to set runnext to 0, but only the current P can set it to non-0.
  6137  	// Hence, there's no need to retry this CAS if it fails.
  6138  	if next != 0 && pp.runnext.cas(next, 0) {
  6139  		return next.ptr(), true
  6140  	}
  6141  
  6142  	for {
  6143  		h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
  6144  		t := pp.runqtail
  6145  		if t == h {
  6146  			return nil, false
  6147  		}
  6148  		gp := pp.runq[h%uint32(len(pp.runq))].ptr()
  6149  		if atomic.CasRel(&pp.runqhead, h, h+1) { // cas-release, commits consume
  6150  			return gp, false
  6151  		}
  6152  	}
  6153  }
  6154  
  6155  // runqdrain drains the local runnable queue of pp and returns all goroutines in it.
  6156  // Executed only by the owner P.
  6157  func runqdrain(pp *p) (drainQ gQueue, n uint32) {
  6158  	oldNext := pp.runnext
  6159  	if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
  6160  		drainQ.pushBack(oldNext.ptr())
  6161  		n++
  6162  	}
  6163  
  6164  retry:
  6165  	h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
  6166  	t := pp.runqtail
  6167  	qn := t - h
  6168  	if qn == 0 {
  6169  		return
  6170  	}
  6171  	if qn > uint32(len(pp.runq)) { // read inconsistent h and t
  6172  		goto retry
  6173  	}
  6174  
  6175  	if !atomic.CasRel(&pp.runqhead, h, h+qn) { // cas-release, commits consume
  6176  		goto retry
  6177  	}
  6178  
  6179  	// We've inverted the order in which it gets G's from the local P's runnable queue
  6180  	// and then advances the head pointer because we don't want to mess up the statuses of G's
  6181  	// while runqdrain() and runqsteal() are running in parallel.
  6182  	// Thus we should advance the head pointer before draining the local P into a gQueue,
  6183  	// so that we can update any gp.schedlink only after we take the full ownership of G,
  6184  	// meanwhile, other P's can't access to all G's in local P's runnable queue and steal them.
  6185  	// See https://groups.google.com/g/golang-dev/c/0pTKxEKhHSc/m/6Q85QjdVBQAJ for more details.
  6186  	for i := uint32(0); i < qn; i++ {
  6187  		gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
  6188  		drainQ.pushBack(gp)
  6189  		n++
  6190  	}
  6191  	return
  6192  }
  6193  
  6194  // Grabs a batch of goroutines from pp's runnable queue into batch.
  6195  // Batch is a ring buffer starting at batchHead.
  6196  // Returns number of grabbed goroutines.
  6197  // Can be executed by any P.
  6198  func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
  6199  	for {
  6200  		h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers
  6201  		t := atomic.LoadAcq(&pp.runqtail) // load-acquire, synchronize with the producer
  6202  		n := t - h
  6203  		n = n - n/2
  6204  		if n == 0 {
  6205  			if stealRunNextG {
  6206  				// Try to steal from pp.runnext.
  6207  				if next := pp.runnext; next != 0 {
  6208  					if pp.status == _Prunning {
  6209  						// Sleep to ensure that pp isn't about to run the g
  6210  						// we are about to steal.
  6211  						// The important use case here is when the g running
  6212  						// on pp ready()s another g and then almost
  6213  						// immediately blocks. Instead of stealing runnext
  6214  						// in this window, back off to give pp a chance to
  6215  						// schedule runnext. This will avoid thrashing gs
  6216  						// between different Ps.
  6217  						// A sync chan send/recv takes ~50ns as of time of
  6218  						// writing, so 3us gives ~50x overshoot.
  6219  						if GOOS != "windows" && GOOS != "openbsd" && GOOS != "netbsd" {
  6220  							usleep(3)
  6221  						} else {
  6222  							// On some platforms system timer granularity is
  6223  							// 1-15ms, which is way too much for this
  6224  							// optimization. So just yield.
  6225  							osyield()
  6226  						}
  6227  					}
  6228  					if !pp.runnext.cas(next, 0) {
  6229  						continue
  6230  					}
  6231  					batch[batchHead%uint32(len(batch))] = next
  6232  					return 1
  6233  				}
  6234  			}
  6235  			return 0
  6236  		}
  6237  		if n > uint32(len(pp.runq)/2) { // read inconsistent h and t
  6238  			continue
  6239  		}
  6240  		for i := uint32(0); i < n; i++ {
  6241  			g := pp.runq[(h+i)%uint32(len(pp.runq))]
  6242  			batch[(batchHead+i)%uint32(len(batch))] = g
  6243  		}
  6244  		if atomic.CasRel(&pp.runqhead, h, h+n) { // cas-release, commits consume
  6245  			return n
  6246  		}
  6247  	}
  6248  }
  6249  
  6250  // Steal half of elements from local runnable queue of p2
  6251  // and put onto local runnable queue of p.
  6252  // Returns one of the stolen elements (or nil if failed).
  6253  func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
  6254  	t := pp.runqtail
  6255  	n := runqgrab(p2, &pp.runq, t, stealRunNextG)
  6256  	if n == 0 {
  6257  		return nil
  6258  	}
  6259  	n--
  6260  	gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
  6261  	if n == 0 {
  6262  		return gp
  6263  	}
  6264  	h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with consumers
  6265  	if t-h+n >= uint32(len(pp.runq)) {
  6266  		throw("runqsteal: runq overflow")
  6267  	}
  6268  	atomic.StoreRel(&pp.runqtail, t+n) // store-release, makes the item available for consumption
  6269  	return gp
  6270  }
  6271  
  6272  // A gQueue is a dequeue of Gs linked through g.schedlink. A G can only
  6273  // be on one gQueue or gList at a time.
  6274  type gQueue struct {
  6275  	head guintptr
  6276  	tail guintptr
  6277  }
  6278  
  6279  // empty reports whether q is empty.
  6280  func (q *gQueue) empty() bool {
  6281  	return q.head == 0
  6282  }
  6283  
  6284  // push adds gp to the head of q.
  6285  func (q *gQueue) push(gp *g) {
  6286  	gp.schedlink = q.head
  6287  	q.head.set(gp)
  6288  	if q.tail == 0 {
  6289  		q.tail.set(gp)
  6290  	}
  6291  }
  6292  
  6293  // pushBack adds gp to the tail of q.
  6294  func (q *gQueue) pushBack(gp *g) {
  6295  	gp.schedlink = 0
  6296  	if q.tail != 0 {
  6297  		q.tail.ptr().schedlink.set(gp)
  6298  	} else {
  6299  		q.head.set(gp)
  6300  	}
  6301  	q.tail.set(gp)
  6302  }
  6303  
  6304  // pushBackAll adds all Gs in q2 to the tail of q. After this q2 must
  6305  // not be used.
  6306  func (q *gQueue) pushBackAll(q2 gQueue) {
  6307  	if q2.tail == 0 {
  6308  		return
  6309  	}
  6310  	q2.tail.ptr().schedlink = 0
  6311  	if q.tail != 0 {
  6312  		q.tail.ptr().schedlink = q2.head
  6313  	} else {
  6314  		q.head = q2.head
  6315  	}
  6316  	q.tail = q2.tail
  6317  }
  6318  
  6319  // pop removes and returns the head of queue q. It returns nil if
  6320  // q is empty.
  6321  func (q *gQueue) pop() *g {
  6322  	gp := q.head.ptr()
  6323  	if gp != nil {
  6324  		q.head = gp.schedlink
  6325  		if q.head == 0 {
  6326  			q.tail = 0
  6327  		}
  6328  	}
  6329  	return gp
  6330  }
  6331  
  6332  // popList takes all Gs in q and returns them as a gList.
  6333  func (q *gQueue) popList() gList {
  6334  	stack := gList{q.head}
  6335  	*q = gQueue{}
  6336  	return stack
  6337  }
  6338  
  6339  // A gList is a list of Gs linked through g.schedlink. A G can only be
  6340  // on one gQueue or gList at a time.
  6341  type gList struct {
  6342  	head guintptr
  6343  }
  6344  
  6345  // empty reports whether l is empty.
  6346  func (l *gList) empty() bool {
  6347  	return l.head == 0
  6348  }
  6349  
  6350  // push adds gp to the head of l.
  6351  func (l *gList) push(gp *g) {
  6352  	gp.schedlink = l.head
  6353  	l.head.set(gp)
  6354  }
  6355  
  6356  // pushAll prepends all Gs in q to l.
  6357  func (l *gList) pushAll(q gQueue) {
  6358  	if !q.empty() {
  6359  		q.tail.ptr().schedlink = l.head
  6360  		l.head = q.head
  6361  	}
  6362  }
  6363  
  6364  // pop removes and returns the head of l. If l is empty, it returns nil.
  6365  func (l *gList) pop() *g {
  6366  	gp := l.head.ptr()
  6367  	if gp != nil {
  6368  		l.head = gp.schedlink
  6369  	}
  6370  	return gp
  6371  }
  6372  
  6373  //go:linkname setMaxThreads runtime/debug.setMaxThreads
  6374  func setMaxThreads(in int) (out int) {
  6375  	lock(&sched.lock)
  6376  	out = int(sched.maxmcount)
  6377  	if in > 0x7fffffff { // MaxInt32
  6378  		sched.maxmcount = 0x7fffffff
  6379  	} else {
  6380  		sched.maxmcount = int32(in)
  6381  	}
  6382  	checkmcount()
  6383  	unlock(&sched.lock)
  6384  	return
  6385  }
  6386  
  6387  //go:nosplit
  6388  func procPin() int {
  6389  	gp := getg()
  6390  	mp := gp.m
  6391  
  6392  	mp.locks++
  6393  	return int(mp.p.ptr().id)
  6394  }
  6395  
  6396  //go:nosplit
  6397  func procUnpin() {
  6398  	gp := getg()
  6399  	gp.m.locks--
  6400  }
  6401  
  6402  //go:linkname sync_runtime_procPin sync.runtime_procPin
  6403  //go:nosplit
  6404  func sync_runtime_procPin() int {
  6405  	return procPin()
  6406  }
  6407  
  6408  //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
  6409  //go:nosplit
  6410  func sync_runtime_procUnpin() {
  6411  	procUnpin()
  6412  }
  6413  
  6414  //go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
  6415  //go:nosplit
  6416  func sync_atomic_runtime_procPin() int {
  6417  	return procPin()
  6418  }
  6419  
  6420  //go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
  6421  //go:nosplit
  6422  func sync_atomic_runtime_procUnpin() {
  6423  	procUnpin()
  6424  }
  6425  
  6426  // Active spinning for sync.Mutex.
  6427  //
  6428  //go:linkname sync_runtime_canSpin sync.runtime_canSpin
  6429  //go:nosplit
  6430  func sync_runtime_canSpin(i int) bool {
  6431  	// sync.Mutex is cooperative, so we are conservative with spinning.
  6432  	// Spin only few times and only if running on a multicore machine and
  6433  	// GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
  6434  	// As opposed to runtime mutex we don't do passive spinning here,
  6435  	// because there can be work on global runq or on other Ps.
  6436  	if i >= active_spin || ncpu <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
  6437  		return false
  6438  	}
  6439  	if p := getg().m.p.ptr(); !runqempty(p) {
  6440  		return false
  6441  	}
  6442  	return true
  6443  }
  6444  
  6445  //go:linkname sync_runtime_doSpin sync.runtime_doSpin
  6446  //go:nosplit
  6447  func sync_runtime_doSpin() {
  6448  	procyield(active_spin_cnt)
  6449  }
  6450  
  6451  var stealOrder randomOrder
  6452  
  6453  // randomOrder/randomEnum are helper types for randomized work stealing.
  6454  // They allow to enumerate all Ps in different pseudo-random orders without repetitions.
  6455  // The algorithm is based on the fact that if we have X such that X and GOMAXPROCS
  6456  // are coprime, then a sequences of (i + X) % GOMAXPROCS gives the required enumeration.
  6457  type randomOrder struct {
  6458  	count    uint32
  6459  	coprimes []uint32
  6460  }
  6461  
  6462  type randomEnum struct {
  6463  	i     uint32
  6464  	count uint32
  6465  	pos   uint32
  6466  	inc   uint32
  6467  }
  6468  
  6469  func (ord *randomOrder) reset(count uint32) {
  6470  	ord.count = count
  6471  	ord.coprimes = ord.coprimes[:0]
  6472  	for i := uint32(1); i <= count; i++ {
  6473  		if gcd(i, count) == 1 {
  6474  			ord.coprimes = append(ord.coprimes, i)
  6475  		}
  6476  	}
  6477  }
  6478  
  6479  func (ord *randomOrder) start(i uint32) randomEnum {
  6480  	return randomEnum{
  6481  		count: ord.count,
  6482  		pos:   i % ord.count,
  6483  		inc:   ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
  6484  	}
  6485  }
  6486  
  6487  func (enum *randomEnum) done() bool {
  6488  	return enum.i == enum.count
  6489  }
  6490  
  6491  func (enum *randomEnum) next() {
  6492  	enum.i++
  6493  	enum.pos = (enum.pos + enum.inc) % enum.count
  6494  }
  6495  
  6496  func (enum *randomEnum) position() uint32 {
  6497  	return enum.pos
  6498  }
  6499  
  6500  func gcd(a, b uint32) uint32 {
  6501  	for b != 0 {
  6502  		a, b = b, a%b
  6503  	}
  6504  	return a
  6505  }
  6506  
  6507  // An initTask represents the set of initializations that need to be done for a package.
  6508  // Keep in sync with ../../test/noinit.go:initTask
  6509  type initTask struct {
  6510  	state uint32 // 0 = uninitialized, 1 = in progress, 2 = done
  6511  	nfns  uint32
  6512  	// followed by nfns pcs, uintptr sized, one per init function to run
  6513  }
  6514  
  6515  // inittrace stores statistics for init functions which are
  6516  // updated by malloc and newproc when active is true.
  6517  var inittrace tracestat
  6518  
  6519  type tracestat struct {
  6520  	active bool   // init tracing activation status
  6521  	id     uint64 // init goroutine id
  6522  	allocs uint64 // heap allocations
  6523  	bytes  uint64 // heap allocated bytes
  6524  }
  6525  
  6526  func doInit(ts []*initTask) {
  6527  	for _, t := range ts {
  6528  		doInit1(t)
  6529  	}
  6530  }
  6531  
  6532  func doInit1(t *initTask) {
  6533  	switch t.state {
  6534  	case 2: // fully initialized
  6535  		return
  6536  	case 1: // initialization in progress
  6537  		throw("recursive call during initialization - linker skew")
  6538  	default: // not initialized yet
  6539  		t.state = 1 // initialization in progress
  6540  
  6541  		var (
  6542  			start  int64
  6543  			before tracestat
  6544  		)
  6545  
  6546  		if inittrace.active {
  6547  			start = nanotime()
  6548  			// Load stats non-atomically since tracinit is updated only by this init goroutine.
  6549  			before = inittrace
  6550  		}
  6551  
  6552  		if t.nfns == 0 {
  6553  			// We should have pruned all of these in the linker.
  6554  			throw("inittask with no functions")
  6555  		}
  6556  
  6557  		firstFunc := add(unsafe.Pointer(t), 8)
  6558  		for i := uint32(0); i < t.nfns; i++ {
  6559  			p := add(firstFunc, uintptr(i)*goarch.PtrSize)
  6560  			f := *(*func())(unsafe.Pointer(&p))
  6561  			f()
  6562  		}
  6563  
  6564  		if inittrace.active {
  6565  			end := nanotime()
  6566  			// Load stats non-atomically since tracinit is updated only by this init goroutine.
  6567  			after := inittrace
  6568  
  6569  			f := *(*func())(unsafe.Pointer(&firstFunc))
  6570  			pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
  6571  
  6572  			var sbuf [24]byte
  6573  			print("init ", pkg, " @")
  6574  			print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
  6575  			print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
  6576  			print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
  6577  			print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
  6578  			print("\n")
  6579  		}
  6580  
  6581  		t.state = 2 // initialization done
  6582  	}
  6583  }