github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/runtime/runtime1.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Keep a cached value to make gotraceback fast,
    14  // since we call it on every call to gentraceback.
    15  // The cached value is a uint32 in which the low bits
    16  // are the "crash" and "all" settings and the remaining
    17  // bits are the traceback value (0 off, 1 on, 2 include system).
    18  const (
    19  	tracebackCrash = 1 << iota
    20  	tracebackAll
    21  	tracebackShift = iota
    22  )
    23  
    24  var traceback_cache uint32 = 2 << tracebackShift
    25  
    26  // gotraceback returns the current traceback settings.
    27  //
    28  // If level is 0, suppress all tracebacks.
    29  // If level is 1, show tracebacks, but exclude runtime frames.
    30  // If level is 2, show tracebacks including runtime frames.
    31  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    32  // If crash is set, crash (core dump, etc) after tracebacking.
    33  //
    34  //go:nosplit
    35  func gotraceback() (level int32, all, crash bool) {
    36  	_g_ := getg()
    37  	all = _g_.m.throwing > 0
    38  	if _g_.m.traceback != 0 {
    39  		level = int32(_g_.m.traceback)
    40  		return
    41  	}
    42  	crash = traceback_cache&tracebackCrash != 0
    43  	all = all || traceback_cache&tracebackAll != 0
    44  	level = int32(traceback_cache >> tracebackShift)
    45  	return
    46  }
    47  
    48  var (
    49  	argc int32
    50  	argv **byte
    51  )
    52  
    53  // nosplit for use in linux/386 startup linux_setup_vdso
    54  //go:nosplit
    55  func argv_index(argv **byte, i int32) *byte {
    56  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
    57  }
    58  
    59  func args(c int32, v **byte) {
    60  	argc = c
    61  	argv = v
    62  	sysargs(c, v)
    63  }
    64  
    65  func goargs() {
    66  	if GOOS == "windows" {
    67  		return
    68  	}
    69  
    70  	argslice = make([]string, argc)
    71  	for i := int32(0); i < argc; i++ {
    72  		argslice[i] = gostringnocopy(argv_index(argv, i))
    73  	}
    74  }
    75  
    76  func goenvs_unix() {
    77  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    78  	// guarantee env[] will immediately follow argv.  Might cause
    79  	// problems.
    80  	n := int32(0)
    81  	for argv_index(argv, argc+1+n) != nil {
    82  		n++
    83  	}
    84  
    85  	envs = make([]string, n)
    86  	for i := int32(0); i < n; i++ {
    87  		envs[i] = gostring(argv_index(argv, argc+1+i))
    88  	}
    89  }
    90  
    91  func environ() []string {
    92  	return envs
    93  }
    94  
    95  // TODO: These should be locals in testAtomic64, but we don't 8-byte
    96  // align stack variables on 386.
    97  var test_z64, test_x64 uint64
    98  
    99  func testAtomic64() {
   100  	test_z64 = 42
   101  	test_x64 = 0
   102  	prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
   103  	prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
   104  	prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
   105  	prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
   106  	if atomic.Cas64(&test_z64, test_x64, 1) {
   107  		throw("cas64 failed")
   108  	}
   109  	if test_x64 != 0 {
   110  		throw("cas64 failed")
   111  	}
   112  	test_x64 = 42
   113  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   114  		throw("cas64 failed")
   115  	}
   116  	if test_x64 != 42 || test_z64 != 1 {
   117  		throw("cas64 failed")
   118  	}
   119  	if atomic.Load64(&test_z64) != 1 {
   120  		throw("load64 failed")
   121  	}
   122  	atomic.Store64(&test_z64, (1<<40)+1)
   123  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   124  		throw("store64 failed")
   125  	}
   126  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   127  		throw("xadd64 failed")
   128  	}
   129  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   130  		throw("xadd64 failed")
   131  	}
   132  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   133  		throw("xchg64 failed")
   134  	}
   135  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   136  		throw("xchg64 failed")
   137  	}
   138  }
   139  
   140  func check() {
   141  	var (
   142  		a     int8
   143  		b     uint8
   144  		c     int16
   145  		d     uint16
   146  		e     int32
   147  		f     uint32
   148  		g     int64
   149  		h     uint64
   150  		i, i1 float32
   151  		j, j1 float64
   152  		k, k1 unsafe.Pointer
   153  		l     *uint16
   154  		m     [4]byte
   155  	)
   156  	type x1t struct {
   157  		x uint8
   158  	}
   159  	type y1t struct {
   160  		x1 x1t
   161  		y  uint8
   162  	}
   163  	var x1 x1t
   164  	var y1 y1t
   165  
   166  	if unsafe.Sizeof(a) != 1 {
   167  		throw("bad a")
   168  	}
   169  	if unsafe.Sizeof(b) != 1 {
   170  		throw("bad b")
   171  	}
   172  	if unsafe.Sizeof(c) != 2 {
   173  		throw("bad c")
   174  	}
   175  	if unsafe.Sizeof(d) != 2 {
   176  		throw("bad d")
   177  	}
   178  	if unsafe.Sizeof(e) != 4 {
   179  		throw("bad e")
   180  	}
   181  	if unsafe.Sizeof(f) != 4 {
   182  		throw("bad f")
   183  	}
   184  	if unsafe.Sizeof(g) != 8 {
   185  		throw("bad g")
   186  	}
   187  	if unsafe.Sizeof(h) != 8 {
   188  		throw("bad h")
   189  	}
   190  	if unsafe.Sizeof(i) != 4 {
   191  		throw("bad i")
   192  	}
   193  	if unsafe.Sizeof(j) != 8 {
   194  		throw("bad j")
   195  	}
   196  	if unsafe.Sizeof(k) != sys.PtrSize {
   197  		throw("bad k")
   198  	}
   199  	if unsafe.Sizeof(l) != sys.PtrSize {
   200  		throw("bad l")
   201  	}
   202  	if unsafe.Sizeof(x1) != 1 {
   203  		throw("bad unsafe.Sizeof x1")
   204  	}
   205  	if unsafe.Offsetof(y1.y) != 1 {
   206  		throw("bad offsetof y1.y")
   207  	}
   208  	if unsafe.Sizeof(y1) != 2 {
   209  		throw("bad unsafe.Sizeof y1")
   210  	}
   211  
   212  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   213  		throw("bad timediv")
   214  	}
   215  
   216  	var z uint32
   217  	z = 1
   218  	if !atomic.Cas(&z, 1, 2) {
   219  		throw("cas1")
   220  	}
   221  	if z != 2 {
   222  		throw("cas2")
   223  	}
   224  
   225  	z = 4
   226  	if atomic.Cas(&z, 5, 6) {
   227  		throw("cas3")
   228  	}
   229  	if z != 4 {
   230  		throw("cas4")
   231  	}
   232  
   233  	z = 0xffffffff
   234  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   235  		throw("cas5")
   236  	}
   237  	if z != 0xfffffffe {
   238  		throw("cas6")
   239  	}
   240  
   241  	k = unsafe.Pointer(uintptr(0xfedcb123))
   242  	if sys.PtrSize == 8 {
   243  		k = unsafe.Pointer(uintptr(unsafe.Pointer(k)) << 10)
   244  	}
   245  	if casp(&k, nil, nil) {
   246  		throw("casp1")
   247  	}
   248  	k1 = add(k, 1)
   249  	if !casp(&k, k, k1) {
   250  		throw("casp2")
   251  	}
   252  	if k != k1 {
   253  		throw("casp3")
   254  	}
   255  
   256  	m = [4]byte{1, 1, 1, 1}
   257  	atomic.Or8(&m[1], 0xf0)
   258  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   259  		throw("atomicor8")
   260  	}
   261  
   262  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   263  	if j == j {
   264  		throw("float64nan")
   265  	}
   266  	if !(j != j) {
   267  		throw("float64nan1")
   268  	}
   269  
   270  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   271  	if j == j1 {
   272  		throw("float64nan2")
   273  	}
   274  	if !(j != j1) {
   275  		throw("float64nan3")
   276  	}
   277  
   278  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   279  	if i == i {
   280  		throw("float32nan")
   281  	}
   282  	if i == i {
   283  		throw("float32nan1")
   284  	}
   285  
   286  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   287  	if i == i1 {
   288  		throw("float32nan2")
   289  	}
   290  	if i == i1 {
   291  		throw("float32nan3")
   292  	}
   293  
   294  	testAtomic64()
   295  
   296  	if _FixedStack != round2(_FixedStack) {
   297  		throw("FixedStack is not power-of-2")
   298  	}
   299  
   300  	if !checkASM() {
   301  		throw("assembly checks failed")
   302  	}
   303  }
   304  
   305  type dbgVar struct {
   306  	name  string
   307  	value *int32
   308  }
   309  
   310  // Holds variables parsed from GODEBUG env var,
   311  // except for "memprofilerate" since there is an
   312  // existing int var for that value, which may
   313  // already have an initial value.
   314  var debug struct {
   315  	allocfreetrace    int32
   316  	cgocheck          int32
   317  	efence            int32
   318  	gccheckmark       int32
   319  	gcpacertrace      int32
   320  	gcshrinkstackoff  int32
   321  	gcstackbarrieroff int32
   322  	gcstackbarrierall int32
   323  	gcstoptheworld    int32
   324  	gctrace           int32
   325  	invalidptr        int32
   326  	sbrk              int32
   327  	scavenge          int32
   328  	scheddetail       int32
   329  	schedtrace        int32
   330  	wbshadow          int32
   331  }
   332  
   333  var dbgvars = []dbgVar{
   334  	{"allocfreetrace", &debug.allocfreetrace},
   335  	{"cgocheck", &debug.cgocheck},
   336  	{"efence", &debug.efence},
   337  	{"gccheckmark", &debug.gccheckmark},
   338  	{"gcpacertrace", &debug.gcpacertrace},
   339  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   340  	{"gcstackbarrieroff", &debug.gcstackbarrieroff},
   341  	{"gcstackbarrierall", &debug.gcstackbarrierall},
   342  	{"gcstoptheworld", &debug.gcstoptheworld},
   343  	{"gctrace", &debug.gctrace},
   344  	{"invalidptr", &debug.invalidptr},
   345  	{"sbrk", &debug.sbrk},
   346  	{"scavenge", &debug.scavenge},
   347  	{"scheddetail", &debug.scheddetail},
   348  	{"schedtrace", &debug.schedtrace},
   349  	{"wbshadow", &debug.wbshadow},
   350  }
   351  
   352  func parsedebugvars() {
   353  	// defaults
   354  	debug.cgocheck = 1
   355  	debug.invalidptr = 1
   356  
   357  	for p := gogetenv("GODEBUG"); p != ""; {
   358  		field := ""
   359  		i := index(p, ",")
   360  		if i < 0 {
   361  			field, p = p, ""
   362  		} else {
   363  			field, p = p[:i], p[i+1:]
   364  		}
   365  		i = index(field, "=")
   366  		if i < 0 {
   367  			continue
   368  		}
   369  		key, value := field[:i], field[i+1:]
   370  
   371  		// Update MemProfileRate directly here since it
   372  		// is int, not int32, and should only be updated
   373  		// if specified in GODEBUG.
   374  		if key == "memprofilerate" {
   375  			MemProfileRate = atoi(value)
   376  		} else {
   377  			for _, v := range dbgvars {
   378  				if v.name == key {
   379  					*v.value = int32(atoi(value))
   380  				}
   381  			}
   382  		}
   383  	}
   384  
   385  	switch p := gogetenv("GOTRACEBACK"); p {
   386  	case "none":
   387  		traceback_cache = 0
   388  	case "single", "":
   389  		traceback_cache = 1 << tracebackShift
   390  	case "all":
   391  		traceback_cache = 1<<tracebackShift | tracebackAll
   392  	case "system":
   393  		traceback_cache = 2<<tracebackShift | tracebackAll
   394  	case "crash":
   395  		traceback_cache = 2<<tracebackShift | tracebackAll | tracebackCrash
   396  	default:
   397  		traceback_cache = uint32(atoi(p))<<tracebackShift | tracebackAll
   398  	}
   399  	// when C owns the process, simply exit'ing the process on fatal errors
   400  	// and panics is surprising. Be louder and abort instead.
   401  	if islibrary || isarchive {
   402  		traceback_cache |= tracebackCrash
   403  	}
   404  
   405  	if debug.gcstackbarrierall > 0 {
   406  		firstStackBarrierOffset = 0
   407  	}
   408  
   409  	// For cgocheck > 1, we turn on the write barrier at all times
   410  	// and check all pointer writes.
   411  	if debug.cgocheck > 1 {
   412  		writeBarrier.cgo = true
   413  		writeBarrier.enabled = true
   414  	}
   415  }
   416  
   417  // Poor mans 64-bit division.
   418  // This is a very special function, do not use it if you are not sure what you are doing.
   419  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   420  // Handles overflow in a time-specific manner.
   421  //go:nosplit
   422  func timediv(v int64, div int32, rem *int32) int32 {
   423  	res := int32(0)
   424  	for bit := 30; bit >= 0; bit-- {
   425  		if v >= int64(div)<<uint(bit) {
   426  			v = v - (int64(div) << uint(bit))
   427  			res += 1 << uint(bit)
   428  		}
   429  	}
   430  	if v >= int64(div) {
   431  		if rem != nil {
   432  			*rem = 0
   433  		}
   434  		return 0x7fffffff
   435  	}
   436  	if rem != nil {
   437  		*rem = int32(v)
   438  	}
   439  	return res
   440  }
   441  
   442  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   443  
   444  //go:nosplit
   445  func acquirem() *m {
   446  	_g_ := getg()
   447  	_g_.m.locks++
   448  	return _g_.m
   449  }
   450  
   451  //go:nosplit
   452  func releasem(mp *m) {
   453  	_g_ := getg()
   454  	mp.locks--
   455  	if mp.locks == 0 && _g_.preempt {
   456  		// restore the preemption request in case we've cleared it in newstack
   457  		_g_.stackguard0 = stackPreempt
   458  	}
   459  }
   460  
   461  //go:nosplit
   462  func gomcache() *mcache {
   463  	return getg().m.mcache
   464  }
   465  
   466  //go:linkname reflect_typelinks reflect.typelinks
   467  func reflect_typelinks() [][]*_type {
   468  	ret := [][]*_type{firstmoduledata.typelinks}
   469  	for datap := firstmoduledata.next; datap != nil; datap = datap.next {
   470  		ret = append(ret, datap.typelinks)
   471  	}
   472  	return ret
   473  }