github.com/c0deoo1/golang1.5@v0.0.0-20220525150107-c87c805d4593/src/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 //go:linkname runtime_init runtime.init 10 func runtime_init() 11 12 // 链接到了用户的init函数 13 //go:linkname main_init main.init 14 func main_init() 15 16 // main_init_done is a signal used by cgocallbackg that initialization 17 // has been completed. It is made before _cgo_notify_runtime_init_done, 18 // so all cgo calls can rely on it existing. When main_init is complete, 19 // it is closed, meaning cgocallbackg can reliably receive from it. 20 var main_init_done chan bool 21 // 链接到了用户定义的main.main函数 22 //go:linkname main_main main.main 23 func main_main() 24 25 // runtimeInitTime is the nanotime() at which the runtime started. 26 var runtimeInitTime int64 27 28 // The main goroutine. 29 // 第一个Goroutine执行的函数 30 func main() { 31 g := getg() 32 33 // Racectx of m0->g0 is used only as the parent of the main goroutine. 34 // It must not be used for anything else. 35 g.m.g0.racectx = 0 36 37 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 38 // Using decimal instead of binary GB and MB because 39 // they look nicer in the stack overflow failure message. 40 if ptrSize == 8 { 41 // 64 位机器上最大的栈空间位1G,32位上为256M 42 maxstacksize = 1000000000 43 } else { 44 maxstacksize = 250000000 45 } 46 47 // Record when the world started. 48 runtimeInitTime = nanotime() 49 // 启动后台监控线程:定时垃圾回收以及并发任务调度相关 50 systemstack(func() { 51 newm(sysmon, nil) 52 }) 53 54 // Lock the main goroutine onto this, the main OS thread, 55 // during initialization. Most programs won't care, but a few 56 // do require certain calls to be made by the main thread. 57 // Those can arrange for main.main to run in the main thread 58 // by calling runtime.LockOSThread during initialization 59 // to preserve the lock. 60 lockOSThread() 61 62 if g.m != &m0 { 63 throw("runtime.main not on m0") 64 } 65 // 执行runtime包内的所有初始化函数 66 runtime_init() // must be before defer 67 68 // Defer unlock so that runtime.Goexit during init does the unlock too. 69 needUnlock := true 70 defer func() { 71 if needUnlock { 72 unlockOSThread() 73 } 74 }() 75 // 启动垃圾回收 76 gcenable() 77 78 main_init_done = make(chan bool) 79 if iscgo { 80 if _cgo_thread_start == nil { 81 throw("_cgo_thread_start missing") 82 } 83 if _cgo_malloc == nil { 84 throw("_cgo_malloc missing") 85 } 86 if _cgo_free == nil { 87 throw("_cgo_free missing") 88 } 89 if GOOS != "windows" { 90 if _cgo_setenv == nil { 91 throw("_cgo_setenv missing") 92 } 93 if _cgo_unsetenv == nil { 94 throw("_cgo_unsetenv missing") 95 } 96 } 97 if _cgo_notify_runtime_init_done == nil { 98 throw("_cgo_notify_runtime_init_done missing") 99 } 100 cgocall(_cgo_notify_runtime_init_done, nil) 101 } 102 // 执行所有用户包的init函数 103 // 所有 init 函数都在同一个 goroutine 内执行。 104 // 所有 init 函数结束后才会执行 main.main 函数。 105 // 另外init的执行顺序和依赖关系、文件名以及定义顺序有关,会被编译器赋予唯一的名称标记 106 main_init() 107 close(main_init_done) 108 109 needUnlock = false 110 unlockOSThread() 111 112 if isarchive || islibrary { 113 // A program compiled with -buildmode=c-archive or c-shared 114 // has a main, but it is not executed. 115 return 116 } 117 // 执行用户的main函数 118 main_main() 119 if raceenabled { 120 racefini() 121 } 122 123 // Make racy client program work: if panicking on 124 // another goroutine at the same time as main returns, 125 // let the other goroutine finish printing the panic trace. 126 // Once it does, it will exit. See issue 3934. 127 if panicking != 0 { 128 gopark(nil, nil, "panicwait", traceEvGoStop, 1) 129 } 130 131 exit(0) 132 for { 133 var x *int32 134 *x = 0 135 } 136 } 137 138 // os_beforeExit is called from os.Exit(0). 139 //go:linkname os_beforeExit os.runtime_beforeExit 140 func os_beforeExit() { 141 if raceenabled { 142 racefini() 143 } 144 } 145 146 // start forcegc helper goroutine 147 func init() { 148 go forcegchelper() 149 } 150 151 func forcegchelper() { 152 forcegc.g = getg() 153 for { 154 lock(&forcegc.lock) 155 if forcegc.idle != 0 { 156 throw("forcegc: phase error") 157 } 158 atomicstore(&forcegc.idle, 1) 159 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1) 160 // this goroutine is explicitly resumed by sysmon 161 if debug.gctrace > 0 { 162 println("GC forced") 163 } 164 startGC(gcBackgroundMode, true) 165 } 166 } 167 168 //go:nosplit 169 170 // Gosched yields the processor, allowing other goroutines to run. It does not 171 // suspend the current goroutine, so execution resumes automatically. 172 func Gosched() { 173 mcall(gosched_m) 174 } 175 176 // Puts the current goroutine into a waiting state and calls unlockf. 177 // If unlockf returns false, the goroutine is resumed. 178 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) { 179 mp := acquirem() 180 gp := mp.curg 181 status := readgstatus(gp) 182 if status != _Grunning && status != _Gscanrunning { 183 throw("gopark: bad g status") 184 } 185 mp.waitlock = lock 186 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) 187 gp.waitreason = reason 188 mp.waittraceev = traceEv 189 mp.waittraceskip = traceskip 190 releasem(mp) 191 // can't do anything that might move the G between Ms here. 192 mcall(park_m) 193 } 194 195 // Puts the current goroutine into a waiting state and unlocks the lock. 196 // The goroutine can be made runnable again by calling goready(gp). 197 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) { 198 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip) 199 } 200 201 func goready(gp *g, traceskip int) { 202 systemstack(func() { 203 ready(gp, traceskip) 204 }) 205 } 206 207 //go:nosplit 208 func acquireSudog() *sudog { 209 // Delicate dance: the semaphore implementation calls 210 // acquireSudog, acquireSudog calls new(sudog), 211 // new calls malloc, malloc can call the garbage collector, 212 // and the garbage collector calls the semaphore implementation 213 // in stopTheWorld. 214 // Break the cycle by doing acquirem/releasem around new(sudog). 215 // The acquirem/releasem increments m.locks during new(sudog), 216 // which keeps the garbage collector from being invoked. 217 mp := acquirem() 218 pp := mp.p.ptr() 219 if len(pp.sudogcache) == 0 { 220 lock(&sched.sudoglock) 221 // First, try to grab a batch from central cache. 222 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil { 223 s := sched.sudogcache 224 sched.sudogcache = s.next 225 s.next = nil 226 pp.sudogcache = append(pp.sudogcache, s) 227 } 228 unlock(&sched.sudoglock) 229 // If the central cache is empty, allocate a new one. 230 if len(pp.sudogcache) == 0 { 231 pp.sudogcache = append(pp.sudogcache, new(sudog)) 232 } 233 } 234 n := len(pp.sudogcache) 235 s := pp.sudogcache[n-1] 236 pp.sudogcache[n-1] = nil 237 pp.sudogcache = pp.sudogcache[:n-1] 238 if s.elem != nil { 239 throw("acquireSudog: found s.elem != nil in cache") 240 } 241 releasem(mp) 242 return s 243 } 244 245 //go:nosplit 246 func releaseSudog(s *sudog) { 247 if s.elem != nil { 248 throw("runtime: sudog with non-nil elem") 249 } 250 if s.selectdone != nil { 251 throw("runtime: sudog with non-nil selectdone") 252 } 253 if s.next != nil { 254 throw("runtime: sudog with non-nil next") 255 } 256 if s.prev != nil { 257 throw("runtime: sudog with non-nil prev") 258 } 259 if s.waitlink != nil { 260 throw("runtime: sudog with non-nil waitlink") 261 } 262 gp := getg() 263 if gp.param != nil { 264 throw("runtime: releaseSudog with non-nil gp.param") 265 } 266 mp := acquirem() // avoid rescheduling to another P 267 pp := mp.p.ptr() 268 if len(pp.sudogcache) == cap(pp.sudogcache) { 269 // Transfer half of local cache to the central cache. 270 var first, last *sudog 271 for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 272 n := len(pp.sudogcache) 273 p := pp.sudogcache[n-1] 274 pp.sudogcache[n-1] = nil 275 pp.sudogcache = pp.sudogcache[:n-1] 276 if first == nil { 277 first = p 278 } else { 279 last.next = p 280 } 281 last = p 282 } 283 lock(&sched.sudoglock) 284 last.next = sched.sudogcache 285 sched.sudogcache = first 286 unlock(&sched.sudoglock) 287 } 288 pp.sudogcache = append(pp.sudogcache, s) 289 releasem(mp) 290 } 291 292 // funcPC returns the entry PC of the function f. 293 // It assumes that f is a func value. Otherwise the behavior is undefined. 294 //go:nosplit 295 func funcPC(f interface{}) uintptr { 296 return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize)) 297 } 298 299 // called from assembly 300 func badmcall(fn func(*g)) { 301 throw("runtime: mcall called on m->g0 stack") 302 } 303 304 func badmcall2(fn func(*g)) { 305 throw("runtime: mcall function returned") 306 } 307 308 func badreflectcall() { 309 panic("runtime: arg size to reflect.call more than 1GB") 310 } 311 312 func lockedOSThread() bool { 313 gp := getg() 314 return gp.lockedm != nil && gp.m.lockedg != nil 315 } 316 317 var ( 318 allgs []*g 319 allglock mutex 320 ) 321 322 func allgadd(gp *g) { 323 if readgstatus(gp) == _Gidle { 324 throw("allgadd: bad status Gidle") 325 } 326 327 lock(&allglock) 328 allgs = append(allgs, gp) 329 allg = &allgs[0] 330 allglen = uintptr(len(allgs)) 331 unlock(&allglock) 332 }