github.com/llvm-mirror/llgo@v0.0.0-20190322182713-bf6f0a60fce1/third_party/gofrontend/libgo/runtime/runtime.c (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 #include <signal.h> 6 #include <unistd.h> 7 8 #include "config.h" 9 10 #include "runtime.h" 11 #include "arch.h" 12 #include "array.h" 13 14 enum { 15 maxround = sizeof(uintptr), 16 }; 17 18 // Keep a cached value to make gotraceback fast, 19 // since we call it on every call to gentraceback. 20 // The cached value is a uint32 in which the low bit 21 // is the "crash" setting and the top 31 bits are the 22 // gotraceback value. 23 static uint32 traceback_cache = ~(uint32)0; 24 25 extern volatile intgo runtime_MemProfileRate 26 __asm__ (GOSYM_PREFIX "runtime.MemProfileRate"); 27 28 29 // The GOTRACEBACK environment variable controls the 30 // behavior of a Go program that is crashing and exiting. 31 // GOTRACEBACK=0 suppress all tracebacks 32 // GOTRACEBACK=1 default behavior - show tracebacks but exclude runtime frames 33 // GOTRACEBACK=2 show tracebacks including runtime frames 34 // GOTRACEBACK=crash show tracebacks including runtime frames, then crash (core dump etc) 35 int32 36 runtime_gotraceback(bool *crash) 37 { 38 String s; 39 const byte *p; 40 uint32 x; 41 42 if(crash != nil) 43 *crash = false; 44 if(runtime_m()->traceback != 0) 45 return runtime_m()->traceback; 46 x = runtime_atomicload(&traceback_cache); 47 if(x == ~(uint32)0) { 48 s = runtime_getenv("GOTRACEBACK"); 49 p = s.str; 50 if(s.len == 0) 51 x = 1<<1; 52 else if(s.len == 5 && runtime_strcmp((const char *)p, "crash") == 0) 53 x = (2<<1) | 1; 54 else 55 x = runtime_atoi(p, s.len)<<1; 56 runtime_atomicstore(&traceback_cache, x); 57 } 58 if(crash != nil) 59 *crash = x&1; 60 return x>>1; 61 } 62 63 static int32 argc; 64 static byte** argv; 65 66 static Slice args; 67 Slice envs; 68 69 void (*runtime_sysargs)(int32, uint8**); 70 71 void 72 runtime_args(int32 c, byte **v) 73 { 74 argc = c; 75 argv = v; 76 if(runtime_sysargs != nil) 77 runtime_sysargs(c, v); 78 } 79 80 byte* 81 runtime_progname() 82 { 83 return argc == 0 ? nil : argv[0]; 84 } 85 86 void 87 runtime_goargs(void) 88 { 89 String *s; 90 int32 i; 91 92 // for windows implementation see "os" package 93 if(Windows) 94 return; 95 96 s = runtime_malloc(argc*sizeof s[0]); 97 for(i=0; i<argc; i++) 98 s[i] = runtime_gostringnocopy((const byte*)argv[i]); 99 args.__values = (void*)s; 100 args.__count = argc; 101 args.__capacity = argc; 102 } 103 104 void 105 runtime_goenvs_unix(void) 106 { 107 String *s; 108 int32 i, n; 109 110 for(n=0; argv[argc+1+n] != 0; n++) 111 ; 112 113 s = runtime_malloc(n*sizeof s[0]); 114 for(i=0; i<n; i++) 115 s[i] = runtime_gostringnocopy(argv[argc+1+i]); 116 envs.__values = (void*)s; 117 envs.__count = n; 118 envs.__capacity = n; 119 } 120 121 // Called from the syscall package. 122 Slice runtime_envs(void) __asm__ (GOSYM_PREFIX "syscall.runtime_envs"); 123 124 Slice 125 runtime_envs() 126 { 127 return envs; 128 } 129 130 Slice os_runtime_args(void) __asm__ (GOSYM_PREFIX "os.runtime_args"); 131 132 Slice 133 os_runtime_args() 134 { 135 return args; 136 } 137 138 int32 139 runtime_atoi(const byte *p, intgo len) 140 { 141 int32 n; 142 143 n = 0; 144 while(len > 0 && '0' <= *p && *p <= '9') { 145 n = n*10 + *p++ - '0'; 146 len--; 147 } 148 return n; 149 } 150 151 static struct root_list runtime_roots = 152 { nil, 153 { { &envs, sizeof envs }, 154 { &args, sizeof args }, 155 { nil, 0 } }, 156 }; 157 158 static void 159 TestAtomic64(void) 160 { 161 uint64 z64, x64; 162 163 z64 = 42; 164 x64 = 0; 165 PREFETCH(&z64); 166 if(runtime_cas64(&z64, x64, 1)) 167 runtime_throw("cas64 failed"); 168 if(x64 != 0) 169 runtime_throw("cas64 failed"); 170 x64 = 42; 171 if(!runtime_cas64(&z64, x64, 1)) 172 runtime_throw("cas64 failed"); 173 if(x64 != 42 || z64 != 1) 174 runtime_throw("cas64 failed"); 175 if(runtime_atomicload64(&z64) != 1) 176 runtime_throw("load64 failed"); 177 runtime_atomicstore64(&z64, (1ull<<40)+1); 178 if(runtime_atomicload64(&z64) != (1ull<<40)+1) 179 runtime_throw("store64 failed"); 180 if(runtime_xadd64(&z64, (1ull<<40)+1) != (2ull<<40)+2) 181 runtime_throw("xadd64 failed"); 182 if(runtime_atomicload64(&z64) != (2ull<<40)+2) 183 runtime_throw("xadd64 failed"); 184 if(runtime_xchg64(&z64, (3ull<<40)+3) != (2ull<<40)+2) 185 runtime_throw("xchg64 failed"); 186 if(runtime_atomicload64(&z64) != (3ull<<40)+3) 187 runtime_throw("xchg64 failed"); 188 } 189 190 void 191 runtime_check(void) 192 { 193 __go_register_gc_roots(&runtime_roots); 194 195 TestAtomic64(); 196 } 197 198 uint32 199 runtime_fastrand1(void) 200 { 201 M *m; 202 uint32 x; 203 204 m = runtime_m(); 205 x = m->fastrand; 206 x += x; 207 if(x & 0x80000000L) 208 x ^= 0x88888eefUL; 209 m->fastrand = x; 210 return x; 211 } 212 213 int64 214 runtime_cputicks(void) 215 { 216 #if defined(__386__) || defined(__x86_64__) 217 uint32 low, high; 218 asm("rdtsc" : "=a" (low), "=d" (high)); 219 return (int64)(((uint64)high << 32) | (uint64)low); 220 #elif defined (__s390__) || defined (__s390x__) 221 uint64 clock = 0; 222 /* stckf may not write the return variable in case of a clock error, so make 223 it read-write to prevent that the initialisation is optimised out. 224 Note: Targets below z9-109 will crash when executing store clock fast, i.e. 225 we don't support Go for machines older than that. */ 226 asm volatile(".insn s,0xb27c0000,%0" /* stckf */ : "+Q" (clock) : : "cc" ); 227 return (int64)clock; 228 #else 229 // FIXME: implement for other processors. 230 return 0; 231 #endif 232 } 233 234 bool 235 runtime_showframe(String s, bool current) 236 { 237 static int32 traceback = -1; 238 239 if(current && runtime_m()->throwing > 0) 240 return 1; 241 if(traceback < 0) 242 traceback = runtime_gotraceback(nil); 243 return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0); 244 } 245 246 static Lock ticksLock; 247 static int64 ticks; 248 249 int64 250 runtime_tickspersecond(void) 251 { 252 int64 res, t0, t1, c0, c1; 253 254 res = (int64)runtime_atomicload64((uint64*)&ticks); 255 if(res != 0) 256 return ticks; 257 runtime_lock(&ticksLock); 258 res = ticks; 259 if(res == 0) { 260 t0 = runtime_nanotime(); 261 c0 = runtime_cputicks(); 262 runtime_usleep(100*1000); 263 t1 = runtime_nanotime(); 264 c1 = runtime_cputicks(); 265 if(t1 == t0) 266 t1++; 267 res = (c1-c0)*1000*1000*1000/(t1-t0); 268 if(res == 0) 269 res++; 270 runtime_atomicstore64((uint64*)&ticks, res); 271 } 272 runtime_unlock(&ticksLock); 273 return res; 274 } 275 276 // Called to initialize a new m (including the bootstrap m). 277 // Called on the parent thread (main thread in case of bootstrap), can allocate memory. 278 void 279 runtime_mpreinit(M *mp) 280 { 281 mp->gsignal = runtime_malg(32*1024, &mp->gsignalstack, &mp->gsignalstacksize); // OS X wants >=8K, Linux >=2K 282 } 283 284 // Called to initialize a new m (including the bootstrap m). 285 // Called on the new thread, can not allocate memory. 286 void 287 runtime_minit(void) 288 { 289 M* m; 290 sigset_t sigs; 291 292 // Initialize signal handling. 293 m = runtime_m(); 294 runtime_signalstack(m->gsignalstack, m->gsignalstacksize); 295 if (sigemptyset(&sigs) != 0) 296 runtime_throw("sigemptyset"); 297 pthread_sigmask(SIG_SETMASK, &sigs, nil); 298 } 299 300 // Called from dropm to undo the effect of an minit. 301 void 302 runtime_unminit(void) 303 { 304 runtime_signalstack(nil, 0); 305 } 306 307 308 void 309 runtime_signalstack(byte *p, int32 n) 310 { 311 stack_t st; 312 313 st.ss_sp = p; 314 st.ss_size = n; 315 st.ss_flags = 0; 316 if(p == nil) 317 st.ss_flags = SS_DISABLE; 318 if(sigaltstack(&st, nil) < 0) 319 *(int *)0xf1 = 0xf1; 320 } 321 322 DebugVars runtime_debug; 323 324 // Holds variables parsed from GODEBUG env var, 325 // except for "memprofilerate" since there is an 326 // existing var for that value which is int 327 // instead of in32 and might have an 328 // initial value. 329 static struct { 330 const char* name; 331 int32* value; 332 } dbgvar[] = { 333 {"allocfreetrace", &runtime_debug.allocfreetrace}, 334 {"efence", &runtime_debug.efence}, 335 {"gctrace", &runtime_debug.gctrace}, 336 {"gcdead", &runtime_debug.gcdead}, 337 {"scheddetail", &runtime_debug.scheddetail}, 338 {"schedtrace", &runtime_debug.schedtrace}, 339 }; 340 341 void 342 runtime_parsedebugvars(void) 343 { 344 String s; 345 const byte *p, *pn; 346 intgo len; 347 intgo i, n; 348 bool tmp; 349 350 // gotraceback caches the GOTRACEBACK setting in traceback_cache. 351 // gotraceback can be called before the environment is available. 352 // traceback_cache must be reset after the environment is made 353 // available, in order for the environment variable to take effect. 354 // The code is fixed differently in Go 1.4. 355 // This is a limited fix for Go 1.3.3. 356 traceback_cache = ~(uint32)0; 357 runtime_gotraceback(&tmp); 358 359 s = runtime_getenv("GODEBUG"); 360 if(s.len == 0) 361 return; 362 p = s.str; 363 len = s.len; 364 for(;;) { 365 for(i=0; i<(intgo)nelem(dbgvar); i++) { 366 n = runtime_findnull((const byte*)dbgvar[i].name); 367 if(len > n && runtime_mcmp(p, "memprofilerate", n) == 0 && p[n] == '=') 368 // Set the MemProfileRate directly since it 369 // is an int, not int32, and should only lbe 370 // set here if specified by GODEBUG 371 runtime_MemProfileRate = runtime_atoi(p+n+1, len-(n+1)); 372 else if(len > n && runtime_mcmp(p, dbgvar[i].name, n) == 0 && p[n] == '=') 373 *dbgvar[i].value = runtime_atoi(p+n+1, len-(n+1)); 374 } 375 pn = (const byte *)runtime_strstr((const char *)p, ","); 376 if(pn == nil || pn - p >= len) 377 break; 378 len -= (pn - p) - 1; 379 p = pn + 1; 380 } 381 } 382 383 // Poor mans 64-bit division. 384 // This is a very special function, do not use it if you are not sure what you are doing. 385 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions. 386 // Handles overflow in a time-specific manner. 387 int32 388 runtime_timediv(int64 v, int32 div, int32 *rem) 389 { 390 int32 res, bit; 391 392 if(v >= (int64)div*0x7fffffffLL) { 393 if(rem != nil) 394 *rem = 0; 395 return 0x7fffffff; 396 } 397 res = 0; 398 for(bit = 30; bit >= 0; bit--) { 399 if(v >= ((int64)div<<bit)) { 400 v = v - ((int64)div<<bit); 401 res += 1<<bit; 402 } 403 } 404 if(rem != nil) 405 *rem = v; 406 return res; 407 } 408 409 // Setting the max stack size doesn't really do anything for gccgo. 410 411 uintptr runtime_maxstacksize = 1<<20; // enough until runtime.main sets it for real 412 413 void memclrBytes(Slice) 414 __asm__ (GOSYM_PREFIX "runtime.memclrBytes"); 415 416 void 417 memclrBytes(Slice s) 418 { 419 runtime_memclr(s.__values, s.__count); 420 }