github.com/s1s1ty/go@v0.0.0-20180207192209-104445e3140f/src/runtime/os_linux.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/sys"
     9  	"unsafe"
    10  )
    11  
    12  type mOS struct{}
    13  
    14  //go:noescape
    15  func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32
    16  
    17  // Linux futex.
    18  //
    19  //	futexsleep(uint32 *addr, uint32 val)
    20  //	futexwakeup(uint32 *addr)
    21  //
    22  // Futexsleep atomically checks if *addr == val and if so, sleeps on addr.
    23  // Futexwakeup wakes up threads sleeping on addr.
    24  // Futexsleep is allowed to wake up spuriously.
    25  
    26  const (
    27  	_FUTEX_WAIT = 0
    28  	_FUTEX_WAKE = 1
    29  )
    30  
    31  // Atomically,
    32  //	if(*addr == val) sleep
    33  // Might be woken up spuriously; that's allowed.
    34  // Don't sleep longer than ns; ns < 0 means forever.
    35  //go:nosplit
    36  func futexsleep(addr *uint32, val uint32, ns int64) {
    37  	var ts timespec
    38  
    39  	// Some Linux kernels have a bug where futex of
    40  	// FUTEX_WAIT returns an internal error code
    41  	// as an errno. Libpthread ignores the return value
    42  	// here, and so can we: as it says a few lines up,
    43  	// spurious wakeups are allowed.
    44  	if ns < 0 {
    45  		futex(unsafe.Pointer(addr), _FUTEX_WAIT, val, nil, nil, 0)
    46  		return
    47  	}
    48  
    49  	// It's difficult to live within the no-split stack limits here.
    50  	// On ARM and 386, a 64-bit divide invokes a general software routine
    51  	// that needs more stack than we can afford. So we use timediv instead.
    52  	// But on real 64-bit systems, where words are larger but the stack limit
    53  	// is not, even timediv is too heavy, and we really need to use just an
    54  	// ordinary machine instruction.
    55  	if sys.PtrSize == 8 {
    56  		ts.set_sec(ns / 1000000000)
    57  		ts.set_nsec(int32(ns % 1000000000))
    58  	} else {
    59  		ts.tv_nsec = 0
    60  		ts.set_sec(int64(timediv(ns, 1000000000, (*int32)(unsafe.Pointer(&ts.tv_nsec)))))
    61  	}
    62  	futex(unsafe.Pointer(addr), _FUTEX_WAIT, val, unsafe.Pointer(&ts), nil, 0)
    63  }
    64  
    65  // If any procs are sleeping on addr, wake up at most cnt.
    66  //go:nosplit
    67  func futexwakeup(addr *uint32, cnt uint32) {
    68  	ret := futex(unsafe.Pointer(addr), _FUTEX_WAKE, cnt, nil, nil, 0)
    69  	if ret >= 0 {
    70  		return
    71  	}
    72  
    73  	// I don't know that futex wakeup can return
    74  	// EAGAIN or EINTR, but if it does, it would be
    75  	// safe to loop and call futex again.
    76  	systemstack(func() {
    77  		print("futexwakeup addr=", addr, " returned ", ret, "\n")
    78  	})
    79  
    80  	*(*int32)(unsafe.Pointer(uintptr(0x1006))) = 0x1006
    81  }
    82  
    83  func getproccount() int32 {
    84  	// This buffer is huge (8 kB) but we are on the system stack
    85  	// and there should be plenty of space (64 kB).
    86  	// Also this is a leaf, so we're not holding up the memory for long.
    87  	// See golang.org/issue/11823.
    88  	// The suggested behavior here is to keep trying with ever-larger
    89  	// buffers, but we don't have a dynamic memory allocator at the
    90  	// moment, so that's a bit tricky and seems like overkill.
    91  	const maxCPUs = 64 * 1024
    92  	var buf [maxCPUs / 8]byte
    93  	r := sched_getaffinity(0, unsafe.Sizeof(buf), &buf[0])
    94  	if r < 0 {
    95  		return 1
    96  	}
    97  	n := int32(0)
    98  	for _, v := range buf[:r] {
    99  		for v != 0 {
   100  			n += int32(v & 1)
   101  			v >>= 1
   102  		}
   103  	}
   104  	if n == 0 {
   105  		n = 1
   106  	}
   107  	return n
   108  }
   109  
   110  // Clone, the Linux rfork.
   111  const (
   112  	_CLONE_VM             = 0x100
   113  	_CLONE_FS             = 0x200
   114  	_CLONE_FILES          = 0x400
   115  	_CLONE_SIGHAND        = 0x800
   116  	_CLONE_PTRACE         = 0x2000
   117  	_CLONE_VFORK          = 0x4000
   118  	_CLONE_PARENT         = 0x8000
   119  	_CLONE_THREAD         = 0x10000
   120  	_CLONE_NEWNS          = 0x20000
   121  	_CLONE_SYSVSEM        = 0x40000
   122  	_CLONE_SETTLS         = 0x80000
   123  	_CLONE_PARENT_SETTID  = 0x100000
   124  	_CLONE_CHILD_CLEARTID = 0x200000
   125  	_CLONE_UNTRACED       = 0x800000
   126  	_CLONE_CHILD_SETTID   = 0x1000000
   127  	_CLONE_STOPPED        = 0x2000000
   128  	_CLONE_NEWUTS         = 0x4000000
   129  	_CLONE_NEWIPC         = 0x8000000
   130  
   131  	cloneFlags = _CLONE_VM | /* share memory */
   132  		_CLONE_FS | /* share cwd, etc */
   133  		_CLONE_FILES | /* share fd table */
   134  		_CLONE_SIGHAND | /* share sig handler table */
   135  		_CLONE_SYSVSEM | /* share SysV semaphore undo lists (see issue #20763) */
   136  		_CLONE_THREAD /* revisit - okay for now */
   137  )
   138  
   139  //go:noescape
   140  func clone(flags int32, stk, mp, gp, fn unsafe.Pointer) int32
   141  
   142  // May run with m.p==nil, so write barriers are not allowed.
   143  //go:nowritebarrier
   144  func newosproc(mp *m, stk unsafe.Pointer) {
   145  	/*
   146  	 * note: strace gets confused if we use CLONE_PTRACE here.
   147  	 */
   148  	if false {
   149  		print("newosproc stk=", stk, " m=", mp, " g=", mp.g0, " clone=", funcPC(clone), " id=", mp.id, " ostk=", &mp, "\n")
   150  	}
   151  
   152  	// Disable signals during clone, so that the new thread starts
   153  	// with signals disabled. It will enable them in minit.
   154  	var oset sigset
   155  	sigprocmask(_SIG_SETMASK, &sigset_all, &oset)
   156  	ret := clone(cloneFlags, stk, unsafe.Pointer(mp), unsafe.Pointer(mp.g0), unsafe.Pointer(funcPC(mstart)))
   157  	sigprocmask(_SIG_SETMASK, &oset, nil)
   158  
   159  	if ret < 0 {
   160  		print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -ret, ")\n")
   161  		if ret == -_EAGAIN {
   162  			println("runtime: may need to increase max user processes (ulimit -u)")
   163  		}
   164  		throw("newosproc")
   165  	}
   166  }
   167  
   168  // Version of newosproc that doesn't require a valid G.
   169  //go:nosplit
   170  func newosproc0(stacksize uintptr, fn unsafe.Pointer) {
   171  	stack := sysAlloc(stacksize, &memstats.stacks_sys)
   172  	if stack == nil {
   173  		write(2, unsafe.Pointer(&failallocatestack[0]), int32(len(failallocatestack)))
   174  		exit(1)
   175  	}
   176  	ret := clone(cloneFlags, unsafe.Pointer(uintptr(stack)+stacksize), nil, nil, fn)
   177  	if ret < 0 {
   178  		write(2, unsafe.Pointer(&failthreadcreate[0]), int32(len(failthreadcreate)))
   179  		exit(1)
   180  	}
   181  }
   182  
   183  var failallocatestack = []byte("runtime: failed to allocate stack for the new OS thread\n")
   184  var failthreadcreate = []byte("runtime: failed to create new OS thread\n")
   185  
   186  const (
   187  	_AT_NULL   = 0  // End of vector
   188  	_AT_PAGESZ = 6  // System physical page size
   189  	_AT_HWCAP  = 16 // hardware capability bit vector
   190  	_AT_RANDOM = 25 // introduced in 2.6.29
   191  	_AT_HWCAP2 = 26 // hardware capability bit vector 2
   192  )
   193  
   194  var procAuxv = []byte("/proc/self/auxv\x00")
   195  
   196  func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
   197  
   198  func sysargs(argc int32, argv **byte) {
   199  	n := argc + 1
   200  
   201  	// skip over argv, envp to get to auxv
   202  	for argv_index(argv, n) != nil {
   203  		n++
   204  	}
   205  
   206  	// skip NULL separator
   207  	n++
   208  
   209  	// now argv+n is auxv
   210  	auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
   211  	if sysauxv(auxv[:]) != 0 {
   212  		return
   213  	}
   214  	// In some situations we don't get a loader-provided
   215  	// auxv, such as when loaded as a library on Android.
   216  	// Fall back to /proc/self/auxv.
   217  	fd := open(&procAuxv[0], 0 /* O_RDONLY */, 0)
   218  	if fd < 0 {
   219  		// On Android, /proc/self/auxv might be unreadable (issue 9229), so we fallback to
   220  		// try using mincore to detect the physical page size.
   221  		// mincore should return EINVAL when address is not a multiple of system page size.
   222  		const size = 256 << 10 // size of memory region to allocate
   223  		p, err := mmap(nil, size, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
   224  		if err != 0 {
   225  			return
   226  		}
   227  		var n uintptr
   228  		for n = 4 << 10; n < size; n <<= 1 {
   229  			err := mincore(unsafe.Pointer(uintptr(p)+n), 1, &addrspace_vec[0])
   230  			if err == 0 {
   231  				physPageSize = n
   232  				break
   233  			}
   234  		}
   235  		if physPageSize == 0 {
   236  			physPageSize = size
   237  		}
   238  		munmap(p, size)
   239  		return
   240  	}
   241  	var buf [128]uintptr
   242  	n = read(fd, noescape(unsafe.Pointer(&buf[0])), int32(unsafe.Sizeof(buf)))
   243  	closefd(fd)
   244  	if n < 0 {
   245  		return
   246  	}
   247  	// Make sure buf is terminated, even if we didn't read
   248  	// the whole file.
   249  	buf[len(buf)-2] = _AT_NULL
   250  	sysauxv(buf[:])
   251  }
   252  
   253  func sysauxv(auxv []uintptr) int {
   254  	var i int
   255  	for ; auxv[i] != _AT_NULL; i += 2 {
   256  		tag, val := auxv[i], auxv[i+1]
   257  		switch tag {
   258  		case _AT_RANDOM:
   259  			// The kernel provides a pointer to 16-bytes
   260  			// worth of random data.
   261  			startupRandomData = (*[16]byte)(unsafe.Pointer(val))[:]
   262  
   263  		case _AT_PAGESZ:
   264  			physPageSize = val
   265  		}
   266  
   267  		archauxv(tag, val)
   268  	}
   269  	return i / 2
   270  }
   271  
   272  func osinit() {
   273  	ncpu = getproccount()
   274  }
   275  
   276  var urandom_dev = []byte("/dev/urandom\x00")
   277  
   278  func getRandomData(r []byte) {
   279  	if startupRandomData != nil {
   280  		n := copy(r, startupRandomData)
   281  		extendRandom(r, n)
   282  		return
   283  	}
   284  	fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
   285  	n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
   286  	closefd(fd)
   287  	extendRandom(r, int(n))
   288  }
   289  
   290  func goenvs() {
   291  	goenvs_unix()
   292  }
   293  
   294  // Called to do synchronous initialization of Go code built with
   295  // -buildmode=c-archive or -buildmode=c-shared.
   296  // None of the Go runtime is initialized.
   297  //go:nosplit
   298  //go:nowritebarrierrec
   299  func libpreinit() {
   300  	initsig(true)
   301  }
   302  
   303  // Called to initialize a new m (including the bootstrap m).
   304  // Called on the parent thread (main thread in case of bootstrap), can allocate memory.
   305  func mpreinit(mp *m) {
   306  	mp.gsignal = malg(32 * 1024) // Linux wants >= 2K
   307  	mp.gsignal.m = mp
   308  }
   309  
   310  func gettid() uint32
   311  
   312  // Called to initialize a new m (including the bootstrap m).
   313  // Called on the new thread, cannot allocate memory.
   314  func minit() {
   315  	minitSignals()
   316  
   317  	// for debuggers, in case cgo created the thread
   318  	getg().m.procid = uint64(gettid())
   319  }
   320  
   321  // Called from dropm to undo the effect of an minit.
   322  //go:nosplit
   323  func unminit() {
   324  	unminitSignals()
   325  }
   326  
   327  func memlimit() uintptr {
   328  	/*
   329  		TODO: Convert to Go when something actually uses the result.
   330  
   331  		Rlimit rl;
   332  		extern byte runtime·text[], runtime·end[];
   333  		uintptr used;
   334  
   335  		if(runtime·getrlimit(RLIMIT_AS, &rl) != 0)
   336  			return 0;
   337  		if(rl.rlim_cur >= 0x7fffffff)
   338  			return 0;
   339  
   340  		// Estimate our VM footprint excluding the heap.
   341  		// Not an exact science: use size of binary plus
   342  		// some room for thread stacks.
   343  		used = runtime·end - runtime·text + (64<<20);
   344  		if(used >= rl.rlim_cur)
   345  			return 0;
   346  
   347  		// If there's not at least 16 MB left, we're probably
   348  		// not going to be able to do much. Treat as no limit.
   349  		rl.rlim_cur -= used;
   350  		if(rl.rlim_cur < (16<<20))
   351  			return 0;
   352  
   353  		return rl.rlim_cur - used;
   354  	*/
   355  
   356  	return 0
   357  }
   358  
   359  //#ifdef GOARCH_386
   360  //#define sa_handler k_sa_handler
   361  //#endif
   362  
   363  func sigreturn()
   364  func sigtramp(sig uint32, info *siginfo, ctx unsafe.Pointer)
   365  func cgoSigtramp()
   366  
   367  //go:noescape
   368  func sigaltstack(new, old *stackt)
   369  
   370  //go:noescape
   371  func setitimer(mode int32, new, old *itimerval)
   372  
   373  //go:noescape
   374  func rtsigprocmask(how int32, new, old *sigset, size int32)
   375  
   376  //go:nosplit
   377  //go:nowritebarrierrec
   378  func sigprocmask(how int32, new, old *sigset) {
   379  	rtsigprocmask(how, new, old, int32(unsafe.Sizeof(*new)))
   380  }
   381  
   382  //go:noescape
   383  func getrlimit(kind int32, limit unsafe.Pointer) int32
   384  func raise(sig uint32)
   385  func raiseproc(sig uint32)
   386  
   387  //go:noescape
   388  func sched_getaffinity(pid, len uintptr, buf *byte) int32
   389  func osyield()
   390  
   391  //go:nosplit
   392  //go:nowritebarrierrec
   393  func setsig(i uint32, fn uintptr) {
   394  	var sa sigactiont
   395  	sa.sa_flags = _SA_SIGINFO | _SA_ONSTACK | _SA_RESTORER | _SA_RESTART
   396  	sigfillset(&sa.sa_mask)
   397  	// Although Linux manpage says "sa_restorer element is obsolete and
   398  	// should not be used". x86_64 kernel requires it. Only use it on
   399  	// x86.
   400  	if GOARCH == "386" || GOARCH == "amd64" {
   401  		sa.sa_restorer = funcPC(sigreturn)
   402  	}
   403  	if fn == funcPC(sighandler) {
   404  		if iscgo {
   405  			fn = funcPC(cgoSigtramp)
   406  		} else {
   407  			fn = funcPC(sigtramp)
   408  		}
   409  	}
   410  	sa.sa_handler = fn
   411  	rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask))
   412  }
   413  
   414  //go:nosplit
   415  //go:nowritebarrierrec
   416  func setsigstack(i uint32) {
   417  	var sa sigactiont
   418  	rt_sigaction(uintptr(i), nil, &sa, unsafe.Sizeof(sa.sa_mask))
   419  	if sa.sa_flags&_SA_ONSTACK != 0 {
   420  		return
   421  	}
   422  	sa.sa_flags |= _SA_ONSTACK
   423  	rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask))
   424  }
   425  
   426  //go:nosplit
   427  //go:nowritebarrierrec
   428  func getsig(i uint32) uintptr {
   429  	var sa sigactiont
   430  	if rt_sigaction(uintptr(i), nil, &sa, unsafe.Sizeof(sa.sa_mask)) != 0 {
   431  		throw("rt_sigaction read failure")
   432  	}
   433  	return sa.sa_handler
   434  }
   435  
   436  // setSignaltstackSP sets the ss_sp field of a stackt.
   437  //go:nosplit
   438  func setSignalstackSP(s *stackt, sp uintptr) {
   439  	*(*uintptr)(unsafe.Pointer(&s.ss_sp)) = sp
   440  }
   441  
   442  func (c *sigctxt) fixsigcode(sig uint32) {
   443  }