github.com/zxy12/golang151_with_comment@v0.0.0-20190507085033-721809559d3c/runtime/proc.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  //go:linkname runtime_init runtime.init
    10  func runtime_init()
    11  
    12  //go:linkname main_init main.init
    13  func main_init()
    14  
    15  // main_init_done is a signal used by cgocallbackg that initialization
    16  // has been completed. It is made before _cgo_notify_runtime_init_done,
    17  // so all cgo calls can rely on it existing. When main_init is complete,
    18  // it is closed, meaning cgocallbackg can reliably receive from it.
    19  var main_init_done chan bool
    20  
    21  //go:linkname main_main main.main
    22  func main_main()
    23  
    24  // runtimeInitTime is the nanotime() at which the runtime started.
    25  var runtimeInitTime int64
    26  
    27  // The main goroutine.
    28  func main() {
    29  	g := getg()
    30  
    31  	// Racectx of m0->g0 is used only as the parent of the main goroutine.
    32  	// It must not be used for anything else.
    33  	g.m.g0.racectx = 0
    34  
    35  	// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
    36  	// Using decimal instead of binary GB and MB because
    37  	// they look nicer in the stack overflow failure message.
    38  	if ptrSize == 8 {
    39  		maxstacksize = 1000000000
    40  	} else {
    41  		maxstacksize = 250000000
    42  	}
    43  
    44  	// Record when the world started.
    45  	runtimeInitTime = nanotime()
    46  
    47  	systemstack(func() {
    48  		newm(sysmon, nil)
    49  	})
    50  
    51  	// Lock the main goroutine onto this, the main OS thread,
    52  	// during initialization.  Most programs won't care, but a few
    53  	// do require certain calls to be made by the main thread.
    54  	// Those can arrange for main.main to run in the main thread
    55  	// by calling runtime.LockOSThread during initialization
    56  	// to preserve the lock.
    57  	lockOSThread()
    58  
    59  	if g.m != &m0 {
    60  		throw("runtime.main not on m0")
    61  	}
    62  
    63  	runtime_init() // must be before defer
    64  
    65  	// Defer unlock so that runtime.Goexit during init does the unlock too.
    66  	needUnlock := true
    67  	defer func() {
    68  		if needUnlock {
    69  			unlockOSThread()
    70  		}
    71  	}()
    72  
    73  	gcenable()
    74  
    75  	main_init_done = make(chan bool)
    76  	if iscgo {
    77  		if _cgo_thread_start == nil {
    78  			throw("_cgo_thread_start missing")
    79  		}
    80  		if _cgo_malloc == nil {
    81  			throw("_cgo_malloc missing")
    82  		}
    83  		if _cgo_free == nil {
    84  			throw("_cgo_free missing")
    85  		}
    86  		if GOOS != "windows" {
    87  			if _cgo_setenv == nil {
    88  				throw("_cgo_setenv missing")
    89  			}
    90  			if _cgo_unsetenv == nil {
    91  				throw("_cgo_unsetenv missing")
    92  			}
    93  		}
    94  		if _cgo_notify_runtime_init_done == nil {
    95  			throw("_cgo_notify_runtime_init_done missing")
    96  		}
    97  		cgocall(_cgo_notify_runtime_init_done, nil)
    98  	}
    99  
   100  	main_init()
   101  	close(main_init_done)
   102  
   103  	needUnlock = false
   104  	unlockOSThread()
   105  
   106  	if isarchive || islibrary {
   107  		// A program compiled with -buildmode=c-archive or c-shared
   108  		// has a main, but it is not executed.
   109  		return
   110  	}
   111  	main_main()
   112  	if raceenabled {
   113  		racefini()
   114  	}
   115  
   116  	// Make racy client program work: if panicking on
   117  	// another goroutine at the same time as main returns,
   118  	// let the other goroutine finish printing the panic trace.
   119  	// Once it does, it will exit. See issue 3934.
   120  	if panicking != 0 {
   121  		gopark(nil, nil, "panicwait", traceEvGoStop, 1)
   122  	}
   123  
   124  	exit(0)
   125  	for {
   126  		var x *int32
   127  		*x = 0
   128  	}
   129  }
   130  
   131  // os_beforeExit is called from os.Exit(0).
   132  //go:linkname os_beforeExit os.runtime_beforeExit
   133  func os_beforeExit() {
   134  	if raceenabled {
   135  		racefini()
   136  	}
   137  }
   138  
   139  // start forcegc helper goroutine
   140  func init() {
   141  	go forcegchelper()
   142  }
   143  
   144  func forcegchelper() {
   145  	forcegc.g = getg()
   146  	for {
   147  		lock(&forcegc.lock)
   148  		if forcegc.idle != 0 {
   149  			throw("forcegc: phase error")
   150  		}
   151  		atomicstore(&forcegc.idle, 1)
   152  		goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
   153  		// this goroutine is explicitly resumed by sysmon
   154  		if debug.gctrace > 0 {
   155  			println("GC forced")
   156  		}
   157  		startGC(gcBackgroundMode, true)
   158  	}
   159  }
   160  
   161  //go:nosplit
   162  
   163  // Gosched yields the processor, allowing other goroutines to run.  It does not
   164  // suspend the current goroutine, so execution resumes automatically.
   165  func Gosched() {
   166  	mcall(gosched_m)
   167  }
   168  
   169  // Puts the current goroutine into a waiting state and calls unlockf.
   170  // If unlockf returns false, the goroutine is resumed.
   171  func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
   172  	mp := acquirem()
   173  	gp := mp.curg
   174  	status := readgstatus(gp)
   175  	if status != _Grunning && status != _Gscanrunning {
   176  		throw("gopark: bad g status")
   177  	}
   178  	mp.waitlock = lock
   179  	mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
   180  	gp.waitreason = reason
   181  	mp.waittraceev = traceEv
   182  	mp.waittraceskip = traceskip
   183  	releasem(mp)
   184  	// can't do anything that might move the G between Ms here.
   185  	mcall(park_m)
   186  }
   187  
   188  // Puts the current goroutine into a waiting state and unlocks the lock.
   189  // The goroutine can be made runnable again by calling goready(gp).
   190  func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
   191  	gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
   192  }
   193  
   194  func goready(gp *g, traceskip int) {
   195  	systemstack(func() {
   196  		ready(gp, traceskip)
   197  	})
   198  }
   199  
   200  //go:nosplit
   201  func acquireSudog() *sudog {
   202  	// Delicate dance: the semaphore implementation calls
   203  	// acquireSudog, acquireSudog calls new(sudog),
   204  	// new calls malloc, malloc can call the garbage collector,
   205  	// and the garbage collector calls the semaphore implementation
   206  	// in stopTheWorld.
   207  	// Break the cycle by doing acquirem/releasem around new(sudog).
   208  	// The acquirem/releasem increments m.locks during new(sudog),
   209  	// which keeps the garbage collector from being invoked.
   210  	mp := acquirem()
   211  	pp := mp.p.ptr()
   212  	if len(pp.sudogcache) == 0 {
   213  		lock(&sched.sudoglock)
   214  		// First, try to grab a batch from central cache.
   215  		for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
   216  			s := sched.sudogcache
   217  			sched.sudogcache = s.next
   218  			s.next = nil
   219  			pp.sudogcache = append(pp.sudogcache, s)
   220  		}
   221  		unlock(&sched.sudoglock)
   222  		// If the central cache is empty, allocate a new one.
   223  		if len(pp.sudogcache) == 0 {
   224  			pp.sudogcache = append(pp.sudogcache, new(sudog))
   225  		}
   226  	}
   227  	n := len(pp.sudogcache)
   228  	s := pp.sudogcache[n-1]
   229  	pp.sudogcache[n-1] = nil
   230  	pp.sudogcache = pp.sudogcache[:n-1]
   231  	if s.elem != nil {
   232  		throw("acquireSudog: found s.elem != nil in cache")
   233  	}
   234  	releasem(mp)
   235  	return s
   236  }
   237  
   238  //go:nosplit
   239  func releaseSudog(s *sudog) {
   240  	if s.elem != nil {
   241  		throw("runtime: sudog with non-nil elem")
   242  	}
   243  	if s.selectdone != nil {
   244  		throw("runtime: sudog with non-nil selectdone")
   245  	}
   246  	if s.next != nil {
   247  		throw("runtime: sudog with non-nil next")
   248  	}
   249  	if s.prev != nil {
   250  		throw("runtime: sudog with non-nil prev")
   251  	}
   252  	if s.waitlink != nil {
   253  		throw("runtime: sudog with non-nil waitlink")
   254  	}
   255  	gp := getg()
   256  	if gp.param != nil {
   257  		throw("runtime: releaseSudog with non-nil gp.param")
   258  	}
   259  	mp := acquirem() // avoid rescheduling to another P
   260  	pp := mp.p.ptr()
   261  	if len(pp.sudogcache) == cap(pp.sudogcache) {
   262  		// Transfer half of local cache to the central cache.
   263  		var first, last *sudog
   264  		for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
   265  			n := len(pp.sudogcache)
   266  			p := pp.sudogcache[n-1]
   267  			pp.sudogcache[n-1] = nil
   268  			pp.sudogcache = pp.sudogcache[:n-1]
   269  			if first == nil {
   270  				first = p
   271  			} else {
   272  				last.next = p
   273  			}
   274  			last = p
   275  		}
   276  		lock(&sched.sudoglock)
   277  		last.next = sched.sudogcache
   278  		sched.sudogcache = first
   279  		unlock(&sched.sudoglock)
   280  	}
   281  	pp.sudogcache = append(pp.sudogcache, s)
   282  	releasem(mp)
   283  }
   284  
   285  // funcPC returns the entry PC of the function f.
   286  // It assumes that f is a func value. Otherwise the behavior is undefined.
   287  //go:nosplit
   288  func funcPC(f interface{}) uintptr {
   289  	return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize))
   290  }
   291  
   292  // called from assembly
   293  func badmcall(fn func(*g)) {
   294  	throw("runtime: mcall called on m->g0 stack")
   295  }
   296  
   297  func badmcall2(fn func(*g)) {
   298  	throw("runtime: mcall function returned")
   299  }
   300  
   301  func badreflectcall() {
   302  	panic("runtime: arg size to reflect.call more than 1GB")
   303  }
   304  
   305  func lockedOSThread() bool {
   306  	gp := getg()
   307  	return gp.lockedm != nil && gp.m.lockedg != nil
   308  }
   309  
   310  var (
   311  	allgs    []*g
   312  	allglock mutex
   313  )
   314  
   315  func allgadd(gp *g) {
   316  	if readgstatus(gp) == _Gidle {
   317  		throw("allgadd: bad status Gidle")
   318  	}
   319  
   320  	lock(&allglock)
   321  	allgs = append(allgs, gp)
   322  	allg = &allgs[0]
   323  	allglen = uintptr(len(allgs))
   324  	unlock(&allglock)
   325  }