github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/os_darwin.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "unsafe" 10 ) 11 12 type mOS struct { 13 initialized bool 14 mutex pthreadmutex 15 cond pthreadcond 16 count int 17 } 18 19 func unimplemented(name string) { 20 println(name, "not implemented") 21 *(*int)(unsafe.Pointer(uintptr(1231))) = 1231 22 } 23 24 //go:nosplit 25 func semacreate(mp *m) { 26 if mp.initialized { 27 return 28 } 29 mp.initialized = true 30 if err := pthread_mutex_init(&mp.mutex, nil); err != 0 { 31 throw("pthread_mutex_init") 32 } 33 if err := pthread_cond_init(&mp.cond, nil); err != 0 { 34 throw("pthread_cond_init") 35 } 36 } 37 38 //go:nosplit 39 func semasleep(ns int64) int32 { 40 var start int64 41 if ns >= 0 { 42 start = nanotime() 43 } 44 mp := getg().m 45 pthread_mutex_lock(&mp.mutex) 46 for { 47 if mp.count > 0 { 48 mp.count-- 49 pthread_mutex_unlock(&mp.mutex) 50 return 0 51 } 52 if ns >= 0 { 53 spent := nanotime() - start 54 if spent >= ns { 55 pthread_mutex_unlock(&mp.mutex) 56 return -1 57 } 58 var t timespec 59 t.setNsec(ns - spent) 60 err := pthread_cond_timedwait_relative_np(&mp.cond, &mp.mutex, &t) 61 if err == _ETIMEDOUT { 62 pthread_mutex_unlock(&mp.mutex) 63 return -1 64 } 65 } else { 66 pthread_cond_wait(&mp.cond, &mp.mutex) 67 } 68 } 69 } 70 71 //go:nosplit 72 func semawakeup(mp *m) { 73 pthread_mutex_lock(&mp.mutex) 74 mp.count++ 75 if mp.count > 0 { 76 pthread_cond_signal(&mp.cond) 77 } 78 pthread_mutex_unlock(&mp.mutex) 79 } 80 81 // The read and write file descriptors used by the sigNote functions. 82 var sigNoteRead, sigNoteWrite int32 83 84 // sigNoteSetup initializes an async-signal-safe note. 85 // 86 // The current implementation of notes on Darwin is not async-signal-safe, 87 // because the functions pthread_mutex_lock, pthread_cond_signal, and 88 // pthread_mutex_unlock, called by semawakeup, are not async-signal-safe. 89 // There is only one case where we need to wake up a note from a signal 90 // handler: the sigsend function. The signal handler code does not require 91 // all the features of notes: it does not need to do a timed wait. 92 // This is a separate implementation of notes, based on a pipe, that does 93 // not support timed waits but is async-signal-safe. 94 func sigNoteSetup(*note) { 95 if sigNoteRead != 0 || sigNoteWrite != 0 { 96 throw("duplicate sigNoteSetup") 97 } 98 var errno int32 99 sigNoteRead, sigNoteWrite, errno = pipe() 100 if errno != 0 { 101 throw("pipe failed") 102 } 103 closeonexec(sigNoteRead) 104 closeonexec(sigNoteWrite) 105 106 // Make the write end of the pipe non-blocking, so that if the pipe 107 // buffer is somehow full we will not block in the signal handler. 108 // Leave the read end of the pipe blocking so that we will block 109 // in sigNoteSleep. 110 setNonblock(sigNoteWrite) 111 } 112 113 // sigNoteWakeup wakes up a thread sleeping on a note created by sigNoteSetup. 114 func sigNoteWakeup(*note) { 115 var b byte 116 write(uintptr(sigNoteWrite), unsafe.Pointer(&b), 1) 117 } 118 119 // sigNoteSleep waits for a note created by sigNoteSetup to be woken. 120 func sigNoteSleep(*note) { 121 for { 122 var b byte 123 entersyscallblock() 124 n := read(sigNoteRead, unsafe.Pointer(&b), 1) 125 exitsyscall() 126 if n != -_EINTR { 127 return 128 } 129 } 130 } 131 132 // BSD interface for threading. 133 func osinit() { 134 // pthread_create delayed until end of goenvs so that we 135 // can look at the environment first. 136 137 ncpu = getncpu() 138 physPageSize = getPageSize() 139 } 140 141 func sysctlbynameInt32(name []byte) (int32, int32) { 142 out := int32(0) 143 nout := unsafe.Sizeof(out) 144 ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0) 145 return ret, out 146 } 147 148 //go:linkname internal_cpu_getsysctlbyname internal/cpu.getsysctlbyname 149 func internal_cpu_getsysctlbyname(name []byte) (int32, int32) { 150 return sysctlbynameInt32(name) 151 } 152 153 const ( 154 _CTL_HW = 6 155 _HW_NCPU = 3 156 _HW_PAGESIZE = 7 157 ) 158 159 func getncpu() int32 { 160 // Use sysctl to fetch hw.ncpu. 161 mib := [2]uint32{_CTL_HW, _HW_NCPU} 162 out := uint32(0) 163 nout := unsafe.Sizeof(out) 164 ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0) 165 if ret >= 0 && int32(out) > 0 { 166 return int32(out) 167 } 168 return 1 169 } 170 171 func getPageSize() uintptr { 172 // Use sysctl to fetch hw.pagesize. 173 mib := [2]uint32{_CTL_HW, _HW_PAGESIZE} 174 out := uint32(0) 175 nout := unsafe.Sizeof(out) 176 ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0) 177 if ret >= 0 && int32(out) > 0 { 178 return uintptr(out) 179 } 180 return 0 181 } 182 183 var urandom_dev = []byte("/dev/urandom\x00") 184 185 //go:nosplit 186 func getRandomData(r []byte) { 187 fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0) 188 n := read(fd, unsafe.Pointer(&r[0]), int32(len(r))) 189 closefd(fd) 190 extendRandom(r, int(n)) 191 } 192 193 func goenvs() { 194 goenvs_unix() 195 } 196 197 // May run with m.p==nil, so write barriers are not allowed. 198 // 199 //go:nowritebarrierrec 200 func newosproc(mp *m) { 201 stk := unsafe.Pointer(mp.g0.stack.hi) 202 if false { 203 print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n") 204 } 205 206 // Initialize an attribute object. 207 var attr pthreadattr 208 var err int32 209 err = pthread_attr_init(&attr) 210 if err != 0 { 211 write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate))) 212 exit(1) 213 } 214 215 // Find out OS stack size for our own stack guard. 216 var stacksize uintptr 217 if pthread_attr_getstacksize(&attr, &stacksize) != 0 { 218 write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate))) 219 exit(1) 220 } 221 mp.g0.stack.hi = stacksize // for mstart 222 223 // Tell the pthread library we won't join with this thread. 224 if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 { 225 write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate))) 226 exit(1) 227 } 228 229 // Finally, create the thread. It starts at mstart_stub, which does some low-level 230 // setup and then calls mstart. 231 var oset sigset 232 sigprocmask(_SIG_SETMASK, &sigset_all, &oset) 233 err = pthread_create(&attr, abi.FuncPCABI0(mstart_stub), unsafe.Pointer(mp)) 234 sigprocmask(_SIG_SETMASK, &oset, nil) 235 if err != 0 { 236 write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate))) 237 exit(1) 238 } 239 } 240 241 // glue code to call mstart from pthread_create. 242 func mstart_stub() 243 244 // newosproc0 is a version of newosproc that can be called before the runtime 245 // is initialized. 246 // 247 // This function is not safe to use after initialization as it does not pass an M as fnarg. 248 // 249 //go:nosplit 250 func newosproc0(stacksize uintptr, fn uintptr) { 251 // Initialize an attribute object. 252 var attr pthreadattr 253 var err int32 254 err = pthread_attr_init(&attr) 255 if err != 0 { 256 write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate))) 257 exit(1) 258 } 259 260 // The caller passes in a suggested stack size, 261 // from when we allocated the stack and thread ourselves, 262 // without libpthread. Now that we're using libpthread, 263 // we use the OS default stack size instead of the suggestion. 264 // Find out that stack size for our own stack guard. 265 if pthread_attr_getstacksize(&attr, &stacksize) != 0 { 266 write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate))) 267 exit(1) 268 } 269 g0.stack.hi = stacksize // for mstart 270 memstats.stacks_sys.add(int64(stacksize)) 271 272 // Tell the pthread library we won't join with this thread. 273 if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 { 274 write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate))) 275 exit(1) 276 } 277 278 // Finally, create the thread. It starts at mstart_stub, which does some low-level 279 // setup and then calls mstart. 280 var oset sigset 281 sigprocmask(_SIG_SETMASK, &sigset_all, &oset) 282 err = pthread_create(&attr, fn, nil) 283 sigprocmask(_SIG_SETMASK, &oset, nil) 284 if err != 0 { 285 write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate))) 286 exit(1) 287 } 288 } 289 290 var failallocatestack = []byte("runtime: failed to allocate stack for the new OS thread\n") 291 var failthreadcreate = []byte("runtime: failed to create new OS thread\n") 292 293 // Called to do synchronous initialization of Go code built with 294 // -buildmode=c-archive or -buildmode=c-shared. 295 // None of the Go runtime is initialized. 296 // 297 //go:nosplit 298 //go:nowritebarrierrec 299 func libpreinit() { 300 initsig(true) 301 } 302 303 // Called to initialize a new m (including the bootstrap m). 304 // Called on the parent thread (main thread in case of bootstrap), can allocate memory. 305 func mpreinit(mp *m) { 306 mp.gsignal = malg(32 * 1024) // OS X wants >= 8K 307 mp.gsignal.m = mp 308 if GOOS == "darwin" && GOARCH == "arm64" { 309 // mlock the signal stack to work around a kernel bug where it may 310 // SIGILL when the signal stack is not faulted in while a signal 311 // arrives. See issue 42774. 312 mlock(unsafe.Pointer(mp.gsignal.stack.hi-physPageSize), physPageSize) 313 } 314 } 315 316 // Called to initialize a new m (including the bootstrap m). 317 // Called on the new thread, cannot allocate memory. 318 func minit() { 319 // iOS does not support alternate signal stack. 320 // The signal handler handles it directly. 321 if !(GOOS == "ios" && GOARCH == "arm64") { 322 minitSignalStack() 323 } 324 minitSignalMask() 325 getg().m.procid = uint64(pthread_self()) 326 } 327 328 // Called from dropm to undo the effect of an minit. 329 // 330 //go:nosplit 331 func unminit() { 332 // iOS does not support alternate signal stack. 333 // See minit. 334 if !(GOOS == "ios" && GOARCH == "arm64") { 335 unminitSignals() 336 } 337 } 338 339 // Called from exitm, but not from drop, to undo the effect of thread-owned 340 // resources in minit, semacreate, or elsewhere. Do not take locks after calling this. 341 func mdestroy(mp *m) { 342 } 343 344 //go:nosplit 345 func osyield_no_g() { 346 usleep_no_g(1) 347 } 348 349 //go:nosplit 350 func osyield() { 351 usleep(1) 352 } 353 354 const ( 355 _NSIG = 32 356 _SI_USER = 0 /* empirically true, but not what headers say */ 357 _SIG_BLOCK = 1 358 _SIG_UNBLOCK = 2 359 _SIG_SETMASK = 3 360 _SS_DISABLE = 4 361 ) 362 363 //extern SigTabTT runtimeĀ·sigtab[]; 364 365 type sigset uint32 366 367 var sigset_all = ^sigset(0) 368 369 //go:nosplit 370 //go:nowritebarrierrec 371 func setsig(i uint32, fn uintptr) { 372 var sa usigactiont 373 sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART 374 sa.sa_mask = ^uint32(0) 375 if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go 376 if iscgo { 377 fn = abi.FuncPCABI0(cgoSigtramp) 378 } else { 379 fn = abi.FuncPCABI0(sigtramp) 380 } 381 } 382 *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = fn 383 sigaction(i, &sa, nil) 384 } 385 386 // sigtramp is the callback from libc when a signal is received. 387 // It is called with the C calling convention. 388 func sigtramp() 389 func cgoSigtramp() 390 391 //go:nosplit 392 //go:nowritebarrierrec 393 func setsigstack(i uint32) { 394 var osa usigactiont 395 sigaction(i, nil, &osa) 396 handler := *(*uintptr)(unsafe.Pointer(&osa.__sigaction_u)) 397 if osa.sa_flags&_SA_ONSTACK != 0 { 398 return 399 } 400 var sa usigactiont 401 *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) = handler 402 sa.sa_mask = osa.sa_mask 403 sa.sa_flags = osa.sa_flags | _SA_ONSTACK 404 sigaction(i, &sa, nil) 405 } 406 407 //go:nosplit 408 //go:nowritebarrierrec 409 func getsig(i uint32) uintptr { 410 var sa usigactiont 411 sigaction(i, nil, &sa) 412 return *(*uintptr)(unsafe.Pointer(&sa.__sigaction_u)) 413 } 414 415 // setSignaltstackSP sets the ss_sp field of a stackt. 416 // 417 //go:nosplit 418 func setSignalstackSP(s *stackt, sp uintptr) { 419 *(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp 420 } 421 422 //go:nosplit 423 //go:nowritebarrierrec 424 func sigaddset(mask *sigset, i int) { 425 *mask |= 1 << (uint32(i) - 1) 426 } 427 428 func sigdelset(mask *sigset, i int) { 429 *mask &^= 1 << (uint32(i) - 1) 430 } 431 432 func setProcessCPUProfiler(hz int32) { 433 setProcessCPUProfilerTimer(hz) 434 } 435 436 func setThreadCPUProfiler(hz int32) { 437 setThreadCPUProfilerHz(hz) 438 } 439 440 //go:nosplit 441 func validSIGPROF(mp *m, c *sigctxt) bool { 442 return true 443 } 444 445 //go:linkname executablePath os.executablePath 446 var executablePath string 447 448 func sysargs(argc int32, argv **byte) { 449 // skip over argv, envv and the first string will be the path 450 n := argc + 1 451 for argv_index(argv, n) != nil { 452 n++ 453 } 454 executablePath = gostringnocopy(argv_index(argv, n+1)) 455 456 // strip "executable_path=" prefix if available, it's added after OS X 10.11. 457 const prefix = "executable_path=" 458 if len(executablePath) > len(prefix) && executablePath[:len(prefix)] == prefix { 459 executablePath = executablePath[len(prefix):] 460 } 461 } 462 463 func signalM(mp *m, sig int) { 464 pthread_kill(pthread(mp.procid), uint32(sig)) 465 } 466 467 // sigPerThreadSyscall is only used on linux, so we assign a bogus signal 468 // number. 469 const sigPerThreadSyscall = 1 << 31 470 471 //go:nosplit 472 func runPerThreadSyscall() { 473 throw("runPerThreadSyscall only valid on linux") 474 }