github.com/4ad/go@v0.0.0-20161219182952-69a12818b605/src/runtime/runtime1.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Keep a cached value to make gotraceback fast,
    14  // since we call it on every call to gentraceback.
    15  // The cached value is a uint32 in which the low bits
    16  // are the "crash" and "all" settings and the remaining
    17  // bits are the traceback value (0 off, 1 on, 2 include system).
    18  const (
    19  	tracebackCrash = 1 << iota
    20  	tracebackAll
    21  	tracebackShift = iota
    22  )
    23  
    24  var traceback_cache uint32 = 2 << tracebackShift
    25  var traceback_env uint32
    26  
    27  // gotraceback returns the current traceback settings.
    28  //
    29  // If level is 0, suppress all tracebacks.
    30  // If level is 1, show tracebacks, but exclude runtime frames.
    31  // If level is 2, show tracebacks including runtime frames.
    32  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    33  // If crash is set, crash (core dump, etc) after tracebacking.
    34  //
    35  //go:nosplit
    36  func gotraceback() (level int32, all, crash bool) {
    37  	_g_ := getg()
    38  	all = _g_.m.throwing > 0
    39  	if _g_.m.traceback != 0 {
    40  		level = int32(_g_.m.traceback)
    41  		return
    42  	}
    43  	t := atomic.Load(&traceback_cache)
    44  	crash = t&tracebackCrash != 0
    45  	all = all || t&tracebackAll != 0
    46  	level = int32(t >> tracebackShift)
    47  	return
    48  }
    49  
    50  var (
    51  	argc int32
    52  	argv **byte
    53  )
    54  
    55  // nosplit for use in linux startup sysargs
    56  //go:nosplit
    57  func argv_index(argv **byte, i int32) *byte {
    58  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
    59  }
    60  
    61  func args(c int32, v **byte) {
    62  	argc = c
    63  	argv = v
    64  	sysargs(c, v)
    65  }
    66  
    67  func goargs() {
    68  	if GOOS == "windows" {
    69  		return
    70  	}
    71  
    72  	argslice = make([]string, argc)
    73  	for i := int32(0); i < argc; i++ {
    74  		argslice[i] = gostringnocopy(argv_index(argv, i))
    75  	}
    76  }
    77  
    78  func goenvs_unix() {
    79  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    80  	// guarantee env[] will immediately follow argv. Might cause
    81  	// problems.
    82  	n := int32(0)
    83  	for argv_index(argv, argc+1+n) != nil {
    84  		n++
    85  	}
    86  
    87  	envs = make([]string, n)
    88  	for i := int32(0); i < n; i++ {
    89  		envs[i] = gostring(argv_index(argv, argc+1+i))
    90  	}
    91  }
    92  
    93  func environ() []string {
    94  	return envs
    95  }
    96  
    97  // TODO: These should be locals in testAtomic64, but we don't 8-byte
    98  // align stack variables on 386.
    99  var test_z64, test_x64 uint64
   100  
   101  func testAtomic64() {
   102  	test_z64 = 42
   103  	test_x64 = 0
   104  	prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
   105  	prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
   106  	prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
   107  	prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
   108  	if atomic.Cas64(&test_z64, test_x64, 1) {
   109  		throw("cas64 failed")
   110  	}
   111  	if test_x64 != 0 {
   112  		throw("cas64 failed")
   113  	}
   114  	test_x64 = 42
   115  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   116  		throw("cas64 failed")
   117  	}
   118  	if test_x64 != 42 || test_z64 != 1 {
   119  		throw("cas64 failed")
   120  	}
   121  	if atomic.Load64(&test_z64) != 1 {
   122  		throw("load64 failed")
   123  	}
   124  	atomic.Store64(&test_z64, (1<<40)+1)
   125  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   126  		throw("store64 failed")
   127  	}
   128  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   129  		throw("xadd64 failed")
   130  	}
   131  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   132  		throw("xadd64 failed")
   133  	}
   134  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   135  		throw("xchg64 failed")
   136  	}
   137  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   138  		throw("xchg64 failed")
   139  	}
   140  }
   141  
   142  func check() {
   143  	var (
   144  		a     int8
   145  		b     uint8
   146  		c     int16
   147  		d     uint16
   148  		e     int32
   149  		f     uint32
   150  		g     int64
   151  		h     uint64
   152  		i, i1 float32
   153  		j, j1 float64
   154  		k, k1 unsafe.Pointer
   155  		l     *uint16
   156  		m     [4]byte
   157  	)
   158  	type x1t struct {
   159  		x uint8
   160  	}
   161  	type y1t struct {
   162  		x1 x1t
   163  		y  uint8
   164  	}
   165  	var x1 x1t
   166  	var y1 y1t
   167  
   168  	if unsafe.Sizeof(a) != 1 {
   169  		throw("bad a")
   170  	}
   171  	if unsafe.Sizeof(b) != 1 {
   172  		throw("bad b")
   173  	}
   174  	if unsafe.Sizeof(c) != 2 {
   175  		throw("bad c")
   176  	}
   177  	if unsafe.Sizeof(d) != 2 {
   178  		throw("bad d")
   179  	}
   180  	if unsafe.Sizeof(e) != 4 {
   181  		throw("bad e")
   182  	}
   183  	if unsafe.Sizeof(f) != 4 {
   184  		throw("bad f")
   185  	}
   186  	if unsafe.Sizeof(g) != 8 {
   187  		throw("bad g")
   188  	}
   189  	if unsafe.Sizeof(h) != 8 {
   190  		throw("bad h")
   191  	}
   192  	if unsafe.Sizeof(i) != 4 {
   193  		throw("bad i")
   194  	}
   195  	if unsafe.Sizeof(j) != 8 {
   196  		throw("bad j")
   197  	}
   198  	if unsafe.Sizeof(k) != sys.PtrSize {
   199  		throw("bad k")
   200  	}
   201  	if unsafe.Sizeof(l) != sys.PtrSize {
   202  		throw("bad l")
   203  	}
   204  	if unsafe.Sizeof(x1) != 1 {
   205  		throw("bad unsafe.Sizeof x1")
   206  	}
   207  	if unsafe.Offsetof(y1.y) != 1 {
   208  		throw("bad offsetof y1.y")
   209  	}
   210  	if unsafe.Sizeof(y1) != 2 {
   211  		throw("bad unsafe.Sizeof y1")
   212  	}
   213  
   214  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   215  		throw("bad timediv")
   216  	}
   217  
   218  	var z uint32
   219  	z = 1
   220  	if !atomic.Cas(&z, 1, 2) {
   221  		throw("cas1")
   222  	}
   223  	if z != 2 {
   224  		throw("cas2")
   225  	}
   226  
   227  	z = 4
   228  	if atomic.Cas(&z, 5, 6) {
   229  		throw("cas3")
   230  	}
   231  	if z != 4 {
   232  		throw("cas4")
   233  	}
   234  
   235  	z = 0xffffffff
   236  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   237  		throw("cas5")
   238  	}
   239  	if z != 0xfffffffe {
   240  		throw("cas6")
   241  	}
   242  
   243  	k = unsafe.Pointer(uintptr(0xfedcb123))
   244  	if sys.PtrSize == 8 {
   245  		k = unsafe.Pointer(uintptr(k) << 10)
   246  	}
   247  	if casp(&k, nil, nil) {
   248  		throw("casp1")
   249  	}
   250  	k1 = add(k, 1)
   251  	if !casp(&k, k, k1) {
   252  		throw("casp2")
   253  	}
   254  	if k != k1 {
   255  		throw("casp3")
   256  	}
   257  
   258  	m = [4]byte{1, 1, 1, 1}
   259  	atomic.Or8(&m[1], 0xf0)
   260  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   261  		throw("atomicor8")
   262  	}
   263  
   264  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   265  	if j == j {
   266  		throw("float64nan")
   267  	}
   268  	if !(j != j) {
   269  		throw("float64nan1")
   270  	}
   271  
   272  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   273  	if j == j1 {
   274  		throw("float64nan2")
   275  	}
   276  	if !(j != j1) {
   277  		throw("float64nan3")
   278  	}
   279  
   280  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   281  	if i == i {
   282  		throw("float32nan")
   283  	}
   284  	if i == i {
   285  		throw("float32nan1")
   286  	}
   287  
   288  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   289  	if i == i1 {
   290  		throw("float32nan2")
   291  	}
   292  	if i == i1 {
   293  		throw("float32nan3")
   294  	}
   295  
   296  	testAtomic64()
   297  
   298  	if _FixedStack != round2(_FixedStack) {
   299  		throw("FixedStack is not power-of-2")
   300  	}
   301  
   302  	if !checkASM() {
   303  		throw("assembly checks failed")
   304  	}
   305  }
   306  
   307  type dbgVar struct {
   308  	name  string
   309  	value *int32
   310  }
   311  
   312  // Holds variables parsed from GODEBUG env var,
   313  // except for "memprofilerate" since there is an
   314  // existing int var for that value, which may
   315  // already have an initial value.
   316  var debug struct {
   317  	allocfreetrace    int32
   318  	cgocheck          int32
   319  	efence            int32
   320  	gccheckmark       int32
   321  	gcpacertrace      int32
   322  	gcshrinkstackoff  int32
   323  	gcstackbarrieroff int32
   324  	gcstackbarrierall int32
   325  	gcstoptheworld    int32
   326  	gctrace           int32
   327  	invalidptr        int32
   328  	sbrk              int32
   329  	scavenge          int32
   330  	scheddetail       int32
   331  	schedtrace        int32
   332  	wbshadow          int32
   333  }
   334  
   335  var dbgvars = []dbgVar{
   336  	{"allocfreetrace", &debug.allocfreetrace},
   337  	{"cgocheck", &debug.cgocheck},
   338  	{"efence", &debug.efence},
   339  	{"gccheckmark", &debug.gccheckmark},
   340  	{"gcpacertrace", &debug.gcpacertrace},
   341  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   342  	{"gcstackbarrieroff", &debug.gcstackbarrieroff},
   343  	{"gcstackbarrierall", &debug.gcstackbarrierall},
   344  	{"gcstoptheworld", &debug.gcstoptheworld},
   345  	{"gctrace", &debug.gctrace},
   346  	{"invalidptr", &debug.invalidptr},
   347  	{"sbrk", &debug.sbrk},
   348  	{"scavenge", &debug.scavenge},
   349  	{"scheddetail", &debug.scheddetail},
   350  	{"schedtrace", &debug.schedtrace},
   351  	{"wbshadow", &debug.wbshadow},
   352  }
   353  
   354  func parsedebugvars() {
   355  	// defaults
   356  	debug.cgocheck = 1
   357  	debug.invalidptr = 1
   358  
   359  	for p := gogetenv("GODEBUG"); p != ""; {
   360  		field := ""
   361  		i := index(p, ",")
   362  		if i < 0 {
   363  			field, p = p, ""
   364  		} else {
   365  			field, p = p[:i], p[i+1:]
   366  		}
   367  		i = index(field, "=")
   368  		if i < 0 {
   369  			continue
   370  		}
   371  		key, value := field[:i], field[i+1:]
   372  
   373  		// Update MemProfileRate directly here since it
   374  		// is int, not int32, and should only be updated
   375  		// if specified in GODEBUG.
   376  		if key == "memprofilerate" {
   377  			MemProfileRate = atoi(value)
   378  		} else {
   379  			for _, v := range dbgvars {
   380  				if v.name == key {
   381  					*v.value = int32(atoi(value))
   382  				}
   383  			}
   384  		}
   385  	}
   386  
   387  	setTraceback(gogetenv("GOTRACEBACK"))
   388  	traceback_env = traceback_cache
   389  
   390  	if debug.gcstackbarrierall > 0 {
   391  		firstStackBarrierOffset = 0
   392  	}
   393  
   394  	// For cgocheck > 1, we turn on the write barrier at all times
   395  	// and check all pointer writes.
   396  	if debug.cgocheck > 1 {
   397  		writeBarrier.cgo = true
   398  		writeBarrier.enabled = true
   399  	}
   400  }
   401  
   402  //go:linkname setTraceback runtime/debug.SetTraceback
   403  func setTraceback(level string) {
   404  	var t uint32
   405  	switch level {
   406  	case "none":
   407  		t = 0
   408  	case "single", "":
   409  		t = 1 << tracebackShift
   410  	case "all":
   411  		t = 1<<tracebackShift | tracebackAll
   412  	case "system":
   413  		t = 2<<tracebackShift | tracebackAll
   414  	case "crash":
   415  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   416  	default:
   417  		t = uint32(atoi(level))<<tracebackShift | tracebackAll
   418  	}
   419  	// when C owns the process, simply exit'ing the process on fatal errors
   420  	// and panics is surprising. Be louder and abort instead.
   421  	if islibrary || isarchive {
   422  		t |= tracebackCrash
   423  	}
   424  
   425  	t |= traceback_env
   426  
   427  	atomic.Store(&traceback_cache, t)
   428  }
   429  
   430  // Poor mans 64-bit division.
   431  // This is a very special function, do not use it if you are not sure what you are doing.
   432  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   433  // Handles overflow in a time-specific manner.
   434  //go:nosplit
   435  func timediv(v int64, div int32, rem *int32) int32 {
   436  	res := int32(0)
   437  	for bit := 30; bit >= 0; bit-- {
   438  		if v >= int64(div)<<uint(bit) {
   439  			v = v - (int64(div) << uint(bit))
   440  			res += 1 << uint(bit)
   441  		}
   442  	}
   443  	if v >= int64(div) {
   444  		if rem != nil {
   445  			*rem = 0
   446  		}
   447  		return 0x7fffffff
   448  	}
   449  	if rem != nil {
   450  		*rem = int32(v)
   451  	}
   452  	return res
   453  }
   454  
   455  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   456  
   457  //go:nosplit
   458  func acquirem() *m {
   459  	_g_ := getg()
   460  	_g_.m.locks++
   461  	return _g_.m
   462  }
   463  
   464  //go:nosplit
   465  func releasem(mp *m) {
   466  	_g_ := getg()
   467  	mp.locks--
   468  	if mp.locks == 0 && _g_.preempt {
   469  		// restore the preemption request in case we've cleared it in newstack
   470  		_g_.stackguard0 = stackPreempt
   471  	}
   472  }
   473  
   474  //go:nosplit
   475  func gomcache() *mcache {
   476  	return getg().m.mcache
   477  }
   478  
   479  //go:linkname reflect_typelinks reflect.typelinks
   480  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   481  	sections := []unsafe.Pointer{unsafe.Pointer(firstmoduledata.types)}
   482  	ret := [][]int32{firstmoduledata.typelinks}
   483  	for datap := firstmoduledata.next; datap != nil; datap = datap.next {
   484  		sections = append(sections, unsafe.Pointer(datap.types))
   485  		ret = append(ret, datap.typelinks)
   486  	}
   487  	return sections, ret
   488  }
   489  
   490  // reflect_resolveNameOff resolves a name offset from a base pointer.
   491  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   492  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   493  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   494  }
   495  
   496  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   497  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   498  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   499  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   500  }
   501  
   502  // reflect_resolveTextOff resolves an function pointer offset from a base type.
   503  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   504  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   505  	return (*_type)(rtype).textOff(textOff(off))
   506  
   507  }
   508  
   509  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   510  //go:linkname reflect_addReflectOff reflect.addReflectOff
   511  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   512  	reflectOffsLock()
   513  	if reflectOffs.m == nil {
   514  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   515  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   516  		reflectOffs.next = -1
   517  	}
   518  	id, found := reflectOffs.minv[ptr]
   519  	if !found {
   520  		id = reflectOffs.next
   521  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   522  		reflectOffs.m[id] = ptr
   523  		reflectOffs.minv[ptr] = id
   524  	}
   525  	reflectOffsUnlock()
   526  	return id
   527  }