github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/os_netbsd.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/goarch"
    10  	"runtime/internal/atomic"
    11  	"unsafe"
    12  )
    13  
    14  const (
    15  	_SS_DISABLE  = 4
    16  	_SIG_BLOCK   = 1
    17  	_SIG_UNBLOCK = 2
    18  	_SIG_SETMASK = 3
    19  	_NSIG        = 33
    20  	_SI_USER     = 0
    21  
    22  	// From NetBSD's <sys/ucontext.h>
    23  	_UC_SIGMASK = 0x01
    24  	_UC_CPU     = 0x04
    25  
    26  	// From <sys/lwp.h>
    27  	_LWP_DETACHED = 0x00000040
    28  )
    29  
    30  type mOS struct {
    31  	waitsemacount uint32
    32  }
    33  
    34  //go:noescape
    35  func setitimer(mode int32, new, old *itimerval)
    36  
    37  //go:noescape
    38  func sigaction(sig uint32, new, old *sigactiont)
    39  
    40  //go:noescape
    41  func sigaltstack(new, old *stackt)
    42  
    43  //go:noescape
    44  func sigprocmask(how int32, new, old *sigset)
    45  
    46  //go:noescape
    47  func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
    48  
    49  func lwp_tramp()
    50  
    51  func raiseproc(sig uint32)
    52  
    53  func lwp_kill(tid int32, sig int)
    54  
    55  //go:noescape
    56  func getcontext(ctxt unsafe.Pointer)
    57  
    58  //go:noescape
    59  func lwp_create(ctxt unsafe.Pointer, flags uintptr, lwpid unsafe.Pointer) int32
    60  
    61  //go:noescape
    62  func lwp_park(clockid, flags int32, ts *timespec, unpark int32, hint, unparkhint unsafe.Pointer) int32
    63  
    64  //go:noescape
    65  func lwp_unpark(lwp int32, hint unsafe.Pointer) int32
    66  
    67  func lwp_self() int32
    68  
    69  func osyield()
    70  
    71  //go:nosplit
    72  func osyield_no_g() {
    73  	osyield()
    74  }
    75  
    76  func kqueue() int32
    77  
    78  //go:noescape
    79  func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
    80  
    81  func pipe2(flags int32) (r, w int32, errno int32)
    82  func closeonexec(fd int32)
    83  
    84  const (
    85  	_ESRCH     = 3
    86  	_ETIMEDOUT = 60
    87  
    88  	// From NetBSD's <sys/time.h>
    89  	_CLOCK_REALTIME  = 0
    90  	_CLOCK_VIRTUAL   = 1
    91  	_CLOCK_PROF      = 2
    92  	_CLOCK_MONOTONIC = 3
    93  
    94  	_TIMER_RELTIME = 0
    95  	_TIMER_ABSTIME = 1
    96  )
    97  
    98  var sigset_all = sigset{[4]uint32{^uint32(0), ^uint32(0), ^uint32(0), ^uint32(0)}}
    99  
   100  // From NetBSD's <sys/sysctl.h>
   101  const (
   102  	_CTL_KERN   = 1
   103  	_KERN_OSREV = 3
   104  
   105  	_CTL_HW        = 6
   106  	_HW_NCPU       = 3
   107  	_HW_PAGESIZE   = 7
   108  	_HW_NCPUONLINE = 16
   109  )
   110  
   111  func sysctlInt(mib []uint32) (int32, bool) {
   112  	var out int32
   113  	nout := unsafe.Sizeof(out)
   114  	ret := sysctl(&mib[0], uint32(len(mib)), (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
   115  	if ret < 0 {
   116  		return 0, false
   117  	}
   118  	return out, true
   119  }
   120  
   121  func getncpu() int32 {
   122  	if n, ok := sysctlInt([]uint32{_CTL_HW, _HW_NCPUONLINE}); ok {
   123  		return int32(n)
   124  	}
   125  	if n, ok := sysctlInt([]uint32{_CTL_HW, _HW_NCPU}); ok {
   126  		return int32(n)
   127  	}
   128  	return 1
   129  }
   130  
   131  func getPageSize() uintptr {
   132  	mib := [2]uint32{_CTL_HW, _HW_PAGESIZE}
   133  	out := uint32(0)
   134  	nout := unsafe.Sizeof(out)
   135  	ret := sysctl(&mib[0], 2, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0)
   136  	if ret >= 0 {
   137  		return uintptr(out)
   138  	}
   139  	return 0
   140  }
   141  
   142  func getOSRev() int {
   143  	if osrev, ok := sysctlInt([]uint32{_CTL_KERN, _KERN_OSREV}); ok {
   144  		return int(osrev)
   145  	}
   146  	return 0
   147  }
   148  
   149  //go:nosplit
   150  func semacreate(mp *m) {
   151  }
   152  
   153  //go:nosplit
   154  func semasleep(ns int64) int32 {
   155  	gp := getg()
   156  	var deadline int64
   157  	if ns >= 0 {
   158  		deadline = nanotime() + ns
   159  	}
   160  
   161  	for {
   162  		v := atomic.Load(&gp.m.waitsemacount)
   163  		if v > 0 {
   164  			if atomic.Cas(&gp.m.waitsemacount, v, v-1) {
   165  				return 0 // semaphore acquired
   166  			}
   167  			continue
   168  		}
   169  
   170  		// Sleep until unparked by semawakeup or timeout.
   171  		var tsp *timespec
   172  		var ts timespec
   173  		if ns >= 0 {
   174  			wait := deadline - nanotime()
   175  			if wait <= 0 {
   176  				return -1
   177  			}
   178  			ts.setNsec(wait)
   179  			tsp = &ts
   180  		}
   181  		ret := lwp_park(_CLOCK_MONOTONIC, _TIMER_RELTIME, tsp, 0, unsafe.Pointer(&gp.m.waitsemacount), nil)
   182  		if ret == _ETIMEDOUT {
   183  			return -1
   184  		}
   185  	}
   186  }
   187  
   188  //go:nosplit
   189  func semawakeup(mp *m) {
   190  	atomic.Xadd(&mp.waitsemacount, 1)
   191  	// From NetBSD's _lwp_unpark(2) manual:
   192  	// "If the target LWP is not currently waiting, it will return
   193  	// immediately upon the next call to _lwp_park()."
   194  	ret := lwp_unpark(int32(mp.procid), unsafe.Pointer(&mp.waitsemacount))
   195  	if ret != 0 && ret != _ESRCH {
   196  		// semawakeup can be called on signal stack.
   197  		systemstack(func() {
   198  			print("thrwakeup addr=", &mp.waitsemacount, " sem=", mp.waitsemacount, " ret=", ret, "\n")
   199  		})
   200  	}
   201  }
   202  
   203  // May run with m.p==nil, so write barriers are not allowed.
   204  //
   205  //go:nowritebarrier
   206  func newosproc(mp *m) {
   207  	stk := unsafe.Pointer(mp.g0.stack.hi)
   208  	if false {
   209  		print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " id=", mp.id, " ostk=", &mp, "\n")
   210  	}
   211  
   212  	var uc ucontextt
   213  	getcontext(unsafe.Pointer(&uc))
   214  
   215  	// _UC_SIGMASK does not seem to work here.
   216  	// It would be nice if _UC_SIGMASK and _UC_STACK
   217  	// worked so that we could do all the work setting
   218  	// the sigmask and the stack here, instead of setting
   219  	// the mask here and the stack in netbsdMstart.
   220  	// For now do the blocking manually.
   221  	uc.uc_flags = _UC_SIGMASK | _UC_CPU
   222  	uc.uc_link = nil
   223  	uc.uc_sigmask = sigset_all
   224  
   225  	var oset sigset
   226  	sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
   227  
   228  	lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, abi.FuncPCABI0(netbsdMstart))
   229  
   230  	ret := retryOnEAGAIN(func() int32 {
   231  		errno := lwp_create(unsafe.Pointer(&uc), _LWP_DETACHED, unsafe.Pointer(&mp.procid))
   232  		// lwp_create returns negative errno
   233  		return -errno
   234  	})
   235  	sigprocmask(_SIG_SETMASK, &oset, nil)
   236  	if ret != 0 {
   237  		print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", ret, ")\n")
   238  		if ret == _EAGAIN {
   239  			println("runtime: may need to increase max user processes (ulimit -p)")
   240  		}
   241  		throw("runtime.newosproc")
   242  	}
   243  }
   244  
   245  // mstart is the entry-point for new Ms.
   246  // It is written in assembly, uses ABI0, is marked TOPFRAME, and calls netbsdMstart0.
   247  func netbsdMstart()
   248  
   249  // netbsdMstart0 is the function call that starts executing a newly
   250  // created thread. On NetBSD, a new thread inherits the signal stack
   251  // of the creating thread. That confuses minit, so we remove that
   252  // signal stack here before calling the regular mstart. It's a bit
   253  // baroque to remove a signal stack here only to add one in minit, but
   254  // it's a simple change that keeps NetBSD working like other OS's.
   255  // At this point all signals are blocked, so there is no race.
   256  //
   257  //go:nosplit
   258  func netbsdMstart0() {
   259  	st := stackt{ss_flags: _SS_DISABLE}
   260  	sigaltstack(&st, nil)
   261  	mstart0()
   262  }
   263  
   264  func osinit() {
   265  	ncpu = getncpu()
   266  	if physPageSize == 0 {
   267  		physPageSize = getPageSize()
   268  	}
   269  	needSysmonWorkaround = getOSRev() < 902000000 // NetBSD 9.2
   270  }
   271  
   272  var urandom_dev = []byte("/dev/urandom\x00")
   273  
   274  //go:nosplit
   275  func getRandomData(r []byte) {
   276  	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
   277  	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
   278  	closefd(fd)
   279  	extendRandom(r, int(n))
   280  }
   281  
   282  func goenvs() {
   283  	goenvs_unix()
   284  }
   285  
   286  // Called to initialize a new m (including the bootstrap m).
   287  // Called on the parent thread (main thread in case of bootstrap), can allocate memory.
   288  func mpreinit(mp *m) {
   289  	mp.gsignal = malg(32 * 1024)
   290  	mp.gsignal.m = mp
   291  }
   292  
   293  // Called to initialize a new m (including the bootstrap m).
   294  // Called on the new thread, cannot allocate memory.
   295  func minit() {
   296  	gp := getg()
   297  	gp.m.procid = uint64(lwp_self())
   298  
   299  	// On NetBSD a thread created by pthread_create inherits the
   300  	// signal stack of the creating thread. We always create a
   301  	// new signal stack here, to avoid having two Go threads using
   302  	// the same signal stack. This breaks the case of a thread
   303  	// created in C that calls sigaltstack and then calls a Go
   304  	// function, because we will lose track of the C code's
   305  	// sigaltstack, but it's the best we can do.
   306  	signalstack(&gp.m.gsignal.stack)
   307  	gp.m.newSigstack = true
   308  
   309  	minitSignalMask()
   310  }
   311  
   312  // Called from dropm to undo the effect of an minit.
   313  //
   314  //go:nosplit
   315  func unminit() {
   316  	unminitSignals()
   317  }
   318  
   319  // Called from exitm, but not from drop, to undo the effect of thread-owned
   320  // resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
   321  func mdestroy(mp *m) {
   322  }
   323  
   324  func sigtramp()
   325  
   326  type sigactiont struct {
   327  	sa_sigaction uintptr
   328  	sa_mask      sigset
   329  	sa_flags     int32
   330  }
   331  
   332  //go:nosplit
   333  //go:nowritebarrierrec
   334  func setsig(i uint32, fn uintptr) {
   335  	var sa sigactiont
   336  	sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTART
   337  	sa.sa_mask = sigset_all
   338  	if fn == abi.FuncPCABIInternal(sighandler) { // abi.FuncPCABIInternal(sighandler) matches the callers in signal_unix.go
   339  		fn = abi.FuncPCABI0(sigtramp)
   340  	}
   341  	sa.sa_sigaction = fn
   342  	sigaction(i, &sa, nil)
   343  }
   344  
   345  //go:nosplit
   346  //go:nowritebarrierrec
   347  func setsigstack(i uint32) {
   348  	throw("setsigstack")
   349  }
   350  
   351  //go:nosplit
   352  //go:nowritebarrierrec
   353  func getsig(i uint32) uintptr {
   354  	var sa sigactiont
   355  	sigaction(i, nil, &sa)
   356  	return sa.sa_sigaction
   357  }
   358  
   359  // setSignalstackSP sets the ss_sp field of a stackt.
   360  //
   361  //go:nosplit
   362  func setSignalstackSP(s *stackt, sp uintptr) {
   363  	s.ss_sp = sp
   364  }
   365  
   366  //go:nosplit
   367  //go:nowritebarrierrec
   368  func sigaddset(mask *sigset, i int) {
   369  	mask.__bits[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
   370  }
   371  
   372  func sigdelset(mask *sigset, i int) {
   373  	mask.__bits[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
   374  }
   375  
   376  //go:nosplit
   377  func (c *sigctxt) fixsigcode(sig uint32) {
   378  }
   379  
   380  func setProcessCPUProfiler(hz int32) {
   381  	setProcessCPUProfilerTimer(hz)
   382  }
   383  
   384  func setThreadCPUProfiler(hz int32) {
   385  	setThreadCPUProfilerHz(hz)
   386  }
   387  
   388  //go:nosplit
   389  func validSIGPROF(mp *m, c *sigctxt) bool {
   390  	return true
   391  }
   392  
   393  func sysargs(argc int32, argv **byte) {
   394  	n := argc + 1
   395  
   396  	// skip over argv, envp to get to auxv
   397  	for argv_index(argv, n) != nil {
   398  		n++
   399  	}
   400  
   401  	// skip NULL separator
   402  	n++
   403  
   404  	// now argv+n is auxv
   405  	auxvp := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
   406  	pairs := sysauxv(auxvp[:])
   407  	auxv = auxvp[: pairs*2 : pairs*2]
   408  }
   409  
   410  const (
   411  	_AT_NULL   = 0 // Terminates the vector
   412  	_AT_PAGESZ = 6 // Page size in bytes
   413  )
   414  
   415  func sysauxv(auxv []uintptr) (pairs int) {
   416  	var i int
   417  	for i = 0; auxv[i] != _AT_NULL; i += 2 {
   418  		tag, val := auxv[i], auxv[i+1]
   419  		switch tag {
   420  		case _AT_PAGESZ:
   421  			physPageSize = val
   422  		}
   423  	}
   424  	return i / 2
   425  }
   426  
   427  // raise sends signal to the calling thread.
   428  //
   429  // It must be nosplit because it is used by the signal handler before
   430  // it definitely has a Go stack.
   431  //
   432  //go:nosplit
   433  func raise(sig uint32) {
   434  	lwp_kill(lwp_self(), int(sig))
   435  }
   436  
   437  func signalM(mp *m, sig int) {
   438  	lwp_kill(int32(mp.procid), sig)
   439  }
   440  
   441  // sigPerThreadSyscall is only used on linux, so we assign a bogus signal
   442  // number.
   443  const sigPerThreadSyscall = 1 << 31
   444  
   445  //go:nosplit
   446  func runPerThreadSyscall() {
   447  	throw("runPerThreadSyscall only valid on linux")
   448  }