github.com/hbdrawn/golang@v0.0.0-20141214014649-6b835209aba2/src/runtime/proc.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 func runtime_init() 10 func main_init() 11 func main_main() 12 13 // The main goroutine. 14 func main() { 15 g := getg() 16 17 // Racectx of m0->g0 is used only as the parent of the main goroutine. 18 // It must not be used for anything else. 19 g.m.g0.racectx = 0 20 21 // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. 22 // Using decimal instead of binary GB and MB because 23 // they look nicer in the stack overflow failure message. 24 if ptrSize == 8 { 25 maxstacksize = 1000000000 26 } else { 27 maxstacksize = 250000000 28 } 29 30 systemstack(newsysmon) 31 32 // Lock the main goroutine onto this, the main OS thread, 33 // during initialization. Most programs won't care, but a few 34 // do require certain calls to be made by the main thread. 35 // Those can arrange for main.main to run in the main thread 36 // by calling runtime.LockOSThread during initialization 37 // to preserve the lock. 38 lockOSThread() 39 40 if g.m != &m0 { 41 gothrow("runtime.main not on m0") 42 } 43 44 runtime_init() // must be before defer 45 46 // Defer unlock so that runtime.Goexit during init does the unlock too. 47 needUnlock := true 48 defer func() { 49 if needUnlock { 50 unlockOSThread() 51 } 52 }() 53 54 memstats.enablegc = true // now that runtime is initialized, GC is okay 55 56 if iscgo { 57 if _cgo_thread_start == nil { 58 gothrow("_cgo_thread_start missing") 59 } 60 if _cgo_malloc == nil { 61 gothrow("_cgo_malloc missing") 62 } 63 if _cgo_free == nil { 64 gothrow("_cgo_free missing") 65 } 66 if GOOS != "windows" { 67 if _cgo_setenv == nil { 68 gothrow("_cgo_setenv missing") 69 } 70 if _cgo_unsetenv == nil { 71 gothrow("_cgo_unsetenv missing") 72 } 73 } 74 } 75 76 main_init() 77 78 needUnlock = false 79 unlockOSThread() 80 81 main_main() 82 if raceenabled { 83 racefini() 84 } 85 86 // Make racy client program work: if panicking on 87 // another goroutine at the same time as main returns, 88 // let the other goroutine finish printing the panic trace. 89 // Once it does, it will exit. See issue 3934. 90 if panicking != 0 { 91 gopark(nil, nil, "panicwait") 92 } 93 94 exit(0) 95 for { 96 var x *int32 97 *x = 0 98 } 99 } 100 101 // start forcegc helper goroutine 102 func init() { 103 go forcegchelper() 104 } 105 106 func forcegchelper() { 107 forcegc.g = getg() 108 forcegc.g.issystem = true 109 for { 110 lock(&forcegc.lock) 111 if forcegc.idle != 0 { 112 gothrow("forcegc: phase error") 113 } 114 atomicstore(&forcegc.idle, 1) 115 goparkunlock(&forcegc.lock, "force gc (idle)") 116 // this goroutine is explicitly resumed by sysmon 117 if debug.gctrace > 0 { 118 println("GC forced") 119 } 120 gogc(1) 121 } 122 } 123 124 //go:nosplit 125 126 // Gosched yields the processor, allowing other goroutines to run. It does not 127 // suspend the current goroutine, so execution resumes automatically. 128 func Gosched() { 129 mcall(gosched_m) 130 } 131 132 // Puts the current goroutine into a waiting state and calls unlockf. 133 // If unlockf returns false, the goroutine is resumed. 134 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string) { 135 mp := acquirem() 136 gp := mp.curg 137 status := readgstatus(gp) 138 if status != _Grunning && status != _Gscanrunning { 139 gothrow("gopark: bad g status") 140 } 141 mp.waitlock = lock 142 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf)) 143 gp.waitreason = reason 144 releasem(mp) 145 // can't do anything that might move the G between Ms here. 146 mcall(park_m) 147 } 148 149 // Puts the current goroutine into a waiting state and unlocks the lock. 150 // The goroutine can be made runnable again by calling goready(gp). 151 func goparkunlock(lock *mutex, reason string) { 152 gopark(parkunlock_c, unsafe.Pointer(lock), reason) 153 } 154 155 func goready(gp *g) { 156 systemstack(func() { 157 ready(gp) 158 }) 159 } 160 161 //go:nosplit 162 func acquireSudog() *sudog { 163 c := gomcache() 164 s := c.sudogcache 165 if s != nil { 166 if s.elem != nil { 167 gothrow("acquireSudog: found s.elem != nil in cache") 168 } 169 c.sudogcache = s.next 170 s.next = nil 171 return s 172 } 173 174 // Delicate dance: the semaphore implementation calls 175 // acquireSudog, acquireSudog calls new(sudog), 176 // new calls malloc, malloc can call the garbage collector, 177 // and the garbage collector calls the semaphore implementation 178 // in stoptheworld. 179 // Break the cycle by doing acquirem/releasem around new(sudog). 180 // The acquirem/releasem increments m.locks during new(sudog), 181 // which keeps the garbage collector from being invoked. 182 mp := acquirem() 183 p := new(sudog) 184 if p.elem != nil { 185 gothrow("acquireSudog: found p.elem != nil after new") 186 } 187 releasem(mp) 188 return p 189 } 190 191 //go:nosplit 192 func releaseSudog(s *sudog) { 193 if s.elem != nil { 194 gothrow("runtime: sudog with non-nil elem") 195 } 196 if s.selectdone != nil { 197 gothrow("runtime: sudog with non-nil selectdone") 198 } 199 if s.next != nil { 200 gothrow("runtime: sudog with non-nil next") 201 } 202 if s.prev != nil { 203 gothrow("runtime: sudog with non-nil prev") 204 } 205 if s.waitlink != nil { 206 gothrow("runtime: sudog with non-nil waitlink") 207 } 208 gp := getg() 209 if gp.param != nil { 210 gothrow("runtime: releaseSudog with non-nil gp.param") 211 } 212 c := gomcache() 213 s.next = c.sudogcache 214 c.sudogcache = s 215 } 216 217 // funcPC returns the entry PC of the function f. 218 // It assumes that f is a func value. Otherwise the behavior is undefined. 219 //go:nosplit 220 func funcPC(f interface{}) uintptr { 221 return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize)) 222 } 223 224 // called from assembly 225 func badmcall(fn func(*g)) { 226 gothrow("runtime: mcall called on m->g0 stack") 227 } 228 229 func badmcall2(fn func(*g)) { 230 gothrow("runtime: mcall function returned") 231 } 232 233 func badreflectcall() { 234 panic("runtime: arg size to reflect.call more than 1GB") 235 } 236 237 func lockedOSThread() bool { 238 gp := getg() 239 return gp.lockedm != nil && gp.m.lockedg != nil 240 } 241 242 func newP() *p { 243 return new(p) 244 } 245 246 func newM() *m { 247 return new(m) 248 } 249 250 func newG() *g { 251 return new(g) 252 } 253 254 var ( 255 allgs []*g 256 allglock mutex 257 ) 258 259 func allgadd(gp *g) { 260 if readgstatus(gp) == _Gidle { 261 gothrow("allgadd: bad status Gidle") 262 } 263 264 lock(&allglock) 265 allgs = append(allgs, gp) 266 allg = &allgs[0] 267 allglen = uintptr(len(allgs)) 268 unlock(&allglock) 269 }