github.com/reiver/go@v0.0.0-20150109200633-1d0c7792f172/src/runtime/runtime1.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 // Keep a cached value to make gotraceback fast, 10 // since we call it on every call to gentraceback. 11 // The cached value is a uint32 in which the low bit 12 // is the "crash" setting and the top 31 bits are the 13 // gotraceback value. 14 var traceback_cache uint32 = 2 << 1 15 16 // The GOTRACEBACK environment variable controls the 17 // behavior of a Go program that is crashing and exiting. 18 // GOTRACEBACK=0 suppress all tracebacks 19 // GOTRACEBACK=1 default behavior - show tracebacks but exclude runtime frames 20 // GOTRACEBACK=2 show tracebacks including runtime frames 21 // GOTRACEBACK=crash show tracebacks including runtime frames, then crash (core dump etc) 22 //go:nosplit 23 func gotraceback(crash *bool) int32 { 24 _g_ := getg() 25 if crash != nil { 26 *crash = false 27 } 28 if _g_.m.traceback != 0 { 29 return int32(_g_.m.traceback) 30 } 31 if crash != nil { 32 *crash = traceback_cache&1 != 0 33 } 34 return int32(traceback_cache >> 1) 35 } 36 37 var ( 38 argc int32 39 argv **byte 40 ) 41 42 // nosplit for use in linux/386 startup linux_setup_vdso 43 //go:nosplit 44 func argv_index(argv **byte, i int32) *byte { 45 return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*ptrSize)) 46 } 47 48 func args(c int32, v **byte) { 49 argc = c 50 argv = v 51 sysargs(c, v) 52 } 53 54 var ( 55 // TODO: Retire in favor of GOOS== checks. 56 isplan9 int32 57 issolaris int32 58 iswindows int32 59 ) 60 61 // Information about what cpu features are available. 62 // Set on startup in asm_{x86/amd64}.s. 63 var ( 64 //cpuid_ecx uint32 65 //cpuid_edx uint32 66 ) 67 68 func goargs() { 69 if GOOS == "windows" { 70 return 71 } 72 73 argslice = make([]string, argc) 74 for i := int32(0); i < argc; i++ { 75 argslice[i] = gostringnocopy(argv_index(argv, i)) 76 } 77 } 78 79 func goenvs_unix() { 80 // TODO(austin): ppc64 in dynamic linking mode doesn't 81 // guarantee env[] will immediately follow argv. Might cause 82 // problems. 83 n := int32(0) 84 for argv_index(argv, argc+1+n) != nil { 85 n++ 86 } 87 88 envs = make([]string, n) 89 for i := int32(0); i < n; i++ { 90 envs[i] = gostringnocopy(argv_index(argv, argc+1+i)) 91 } 92 } 93 94 func environ() []string { 95 return envs 96 } 97 98 func testAtomic64() { 99 var z64, x64 uint64 100 101 z64 = 42 102 x64 = 0 103 prefetcht0(uintptr(unsafe.Pointer(&z64))) 104 prefetcht1(uintptr(unsafe.Pointer(&z64))) 105 prefetcht2(uintptr(unsafe.Pointer(&z64))) 106 prefetchnta(uintptr(unsafe.Pointer(&z64))) 107 if cas64(&z64, x64, 1) { 108 throw("cas64 failed") 109 } 110 if x64 != 0 { 111 throw("cas64 failed") 112 } 113 x64 = 42 114 if !cas64(&z64, x64, 1) { 115 throw("cas64 failed") 116 } 117 if x64 != 42 || z64 != 1 { 118 throw("cas64 failed") 119 } 120 if atomicload64(&z64) != 1 { 121 throw("load64 failed") 122 } 123 atomicstore64(&z64, (1<<40)+1) 124 if atomicload64(&z64) != (1<<40)+1 { 125 throw("store64 failed") 126 } 127 if xadd64(&z64, (1<<40)+1) != (2<<40)+2 { 128 throw("xadd64 failed") 129 } 130 if atomicload64(&z64) != (2<<40)+2 { 131 throw("xadd64 failed") 132 } 133 if xchg64(&z64, (3<<40)+3) != (2<<40)+2 { 134 throw("xchg64 failed") 135 } 136 if atomicload64(&z64) != (3<<40)+3 { 137 throw("xchg64 failed") 138 } 139 } 140 141 func check() { 142 var ( 143 a int8 144 b uint8 145 c int16 146 d uint16 147 e int32 148 f uint32 149 g int64 150 h uint64 151 i, i1 float32 152 j, j1 float64 153 k, k1 unsafe.Pointer 154 l *uint16 155 m [4]byte 156 ) 157 type x1t struct { 158 x uint8 159 } 160 type y1t struct { 161 x1 x1t 162 y uint8 163 } 164 var x1 x1t 165 var y1 y1t 166 167 if unsafe.Sizeof(a) != 1 { 168 throw("bad a") 169 } 170 if unsafe.Sizeof(b) != 1 { 171 throw("bad b") 172 } 173 if unsafe.Sizeof(c) != 2 { 174 throw("bad c") 175 } 176 if unsafe.Sizeof(d) != 2 { 177 throw("bad d") 178 } 179 if unsafe.Sizeof(e) != 4 { 180 throw("bad e") 181 } 182 if unsafe.Sizeof(f) != 4 { 183 throw("bad f") 184 } 185 if unsafe.Sizeof(g) != 8 { 186 throw("bad g") 187 } 188 if unsafe.Sizeof(h) != 8 { 189 throw("bad h") 190 } 191 if unsafe.Sizeof(i) != 4 { 192 throw("bad i") 193 } 194 if unsafe.Sizeof(j) != 8 { 195 throw("bad j") 196 } 197 if unsafe.Sizeof(k) != ptrSize { 198 throw("bad k") 199 } 200 if unsafe.Sizeof(l) != ptrSize { 201 throw("bad l") 202 } 203 if unsafe.Sizeof(x1) != 1 { 204 throw("bad unsafe.Sizeof x1") 205 } 206 if unsafe.Offsetof(y1.y) != 1 { 207 throw("bad offsetof y1.y") 208 } 209 if unsafe.Sizeof(y1) != 2 { 210 throw("bad unsafe.Sizeof y1") 211 } 212 213 if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 { 214 throw("bad timediv") 215 } 216 217 var z uint32 218 z = 1 219 if !cas(&z, 1, 2) { 220 throw("cas1") 221 } 222 if z != 2 { 223 throw("cas2") 224 } 225 226 z = 4 227 if cas(&z, 5, 6) { 228 throw("cas3") 229 } 230 if z != 4 { 231 throw("cas4") 232 } 233 234 z = 0xffffffff 235 if !cas(&z, 0xffffffff, 0xfffffffe) { 236 throw("cas5") 237 } 238 if z != 0xfffffffe { 239 throw("cas6") 240 } 241 242 k = unsafe.Pointer(uintptr(0xfedcb123)) 243 if ptrSize == 8 { 244 k = unsafe.Pointer(uintptr(unsafe.Pointer(k)) << 10) 245 } 246 if casp(&k, nil, nil) { 247 throw("casp1") 248 } 249 k1 = add(k, 1) 250 if !casp(&k, k, k1) { 251 throw("casp2") 252 } 253 if k != k1 { 254 throw("casp3") 255 } 256 257 m = [4]byte{1, 1, 1, 1} 258 atomicor8(&m[1], 0xf0) 259 if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 { 260 throw("atomicor8") 261 } 262 263 *(*uint64)(unsafe.Pointer(&j)) = ^uint64(0) 264 if j == j { 265 throw("float64nan") 266 } 267 if !(j != j) { 268 throw("float64nan1") 269 } 270 271 *(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1) 272 if j == j1 { 273 throw("float64nan2") 274 } 275 if !(j != j1) { 276 throw("float64nan3") 277 } 278 279 *(*uint32)(unsafe.Pointer(&i)) = ^uint32(0) 280 if i == i { 281 throw("float32nan") 282 } 283 if i == i { 284 throw("float32nan1") 285 } 286 287 *(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1) 288 if i == i1 { 289 throw("float32nan2") 290 } 291 if i == i1 { 292 throw("float32nan3") 293 } 294 295 testAtomic64() 296 297 if _FixedStack != round2(_FixedStack) { 298 throw("FixedStack is not power-of-2") 299 } 300 } 301 302 type dbgVar struct { 303 name string 304 value *int32 305 } 306 307 // TODO(rsc): Make GC respect debug.invalidptr. 308 309 // Holds variables parsed from GODEBUG env var. 310 var debug struct { 311 allocfreetrace int32 312 efence int32 313 gcdead int32 314 gctrace int32 315 invalidptr int32 316 scavenge int32 317 scheddetail int32 318 schedtrace int32 319 wbshadow int32 320 } 321 322 var dbgvars = []dbgVar{ 323 {"allocfreetrace", &debug.allocfreetrace}, 324 {"efence", &debug.efence}, 325 {"gcdead", &debug.gcdead}, 326 {"gctrace", &debug.gctrace}, 327 {"invalidptr", &debug.invalidptr}, 328 {"scavenge", &debug.scavenge}, 329 {"scheddetail", &debug.scheddetail}, 330 {"schedtrace", &debug.schedtrace}, 331 {"wbshadow", &debug.wbshadow}, 332 } 333 334 func parsedebugvars() { 335 for p := gogetenv("GODEBUG"); p != ""; { 336 field := "" 337 i := index(p, ",") 338 if i < 0 { 339 field, p = p, "" 340 } else { 341 field, p = p[:i], p[i+1:] 342 } 343 i = index(field, "=") 344 if i < 0 { 345 continue 346 } 347 key, value := field[:i], field[i+1:] 348 for _, v := range dbgvars { 349 if v.name == key { 350 *v.value = int32(atoi(value)) 351 } 352 } 353 } 354 355 switch p := gogetenv("GOTRACEBACK"); p { 356 case "": 357 traceback_cache = 1 << 1 358 case "crash": 359 traceback_cache = 2<<1 | 1 360 default: 361 traceback_cache = uint32(atoi(p)) << 1 362 } 363 } 364 365 // Poor mans 64-bit division. 366 // This is a very special function, do not use it if you are not sure what you are doing. 367 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions. 368 // Handles overflow in a time-specific manner. 369 //go:nosplit 370 func timediv(v int64, div int32, rem *int32) int32 { 371 res := int32(0) 372 for bit := 30; bit >= 0; bit-- { 373 if v >= int64(div)<<uint(bit) { 374 v = v - (int64(div) << uint(bit)) 375 res += 1 << uint(bit) 376 } 377 } 378 if v >= int64(div) { 379 if rem != nil { 380 *rem = 0 381 } 382 return 0x7fffffff 383 } 384 if rem != nil { 385 *rem = int32(v) 386 } 387 return res 388 } 389 390 // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block. 391 392 //go:nosplit 393 func acquirem() *m { 394 _g_ := getg() 395 _g_.m.locks++ 396 return _g_.m 397 } 398 399 //go:nosplit 400 func releasem(mp *m) { 401 _g_ := getg() 402 mp.locks-- 403 if mp.locks == 0 && _g_.preempt { 404 // restore the preemption request in case we've cleared it in newstack 405 _g_.stackguard0 = stackPreempt 406 } 407 } 408 409 //go:nosplit 410 func gomcache() *mcache { 411 return getg().m.mcache 412 } 413 414 var typelink, etypelink [0]byte 415 416 //go:linkname reflect_typelinks reflect.typelinks 417 //go:nosplit 418 func reflect_typelinks() []*_type { 419 var ret []*_type 420 sp := (*slice)(unsafe.Pointer(&ret)) 421 sp.array = (*byte)(unsafe.Pointer(&typelink)) 422 sp.len = uint((uintptr(unsafe.Pointer(&etypelink)) - uintptr(unsafe.Pointer(&typelink))) / unsafe.Sizeof(ret[0])) 423 sp.cap = sp.len 424 return ret 425 } 426 427 // TODO: move back into mgc0.c when converted to Go 428 func readgogc() int32 { 429 p := gogetenv("GOGC") 430 if p == "" { 431 return 100 432 } 433 if p == "off" { 434 return -1 435 } 436 return int32(atoi(p)) 437 }