github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/runtime/runtime1.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  // Keep a cached value to make gotraceback fast,
    10  // since we call it on every call to gentraceback.
    11  // The cached value is a uint32 in which the low bits
    12  // are the "crash" and "all" settings and the remaining
    13  // bits are the traceback value (0 off, 1 on, 2 include system).
    14  const (
    15  	tracebackCrash = 1 << iota
    16  	tracebackAll
    17  	tracebackShift = iota
    18  )
    19  
    20  var traceback_cache uint32 = 2 << tracebackShift
    21  
    22  // gotraceback returns the current traceback settings.
    23  //
    24  // If level is 0, suppress all tracebacks.
    25  // If level is 1, show tracebacks, but exclude runtime frames.
    26  // If level is 2, show tracebacks including runtime frames.
    27  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    28  // If crash is set, crash (core dump, etc) after tracebacking.
    29  //
    30  //go:nosplit
    31  func gotraceback() (level int32, all, crash bool) {
    32  	_g_ := getg()
    33  	all = _g_.m.throwing > 0
    34  	if _g_.m.traceback != 0 {
    35  		level = int32(_g_.m.traceback)
    36  		return
    37  	}
    38  	crash = traceback_cache&tracebackCrash != 0
    39  	all = all || traceback_cache&tracebackAll != 0
    40  	level = int32(traceback_cache >> tracebackShift)
    41  	return
    42  }
    43  
    44  var (
    45  	argc int32
    46  	argv **byte
    47  )
    48  
    49  // nosplit for use in linux/386 startup linux_setup_vdso
    50  //go:nosplit
    51  func argv_index(argv **byte, i int32) *byte {
    52  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*ptrSize))
    53  }
    54  
    55  func args(c int32, v **byte) {
    56  	argc = c
    57  	argv = v
    58  	sysargs(c, v)
    59  }
    60  
    61  func goargs() {
    62  	if GOOS == "windows" {
    63  		return
    64  	}
    65  
    66  	argslice = make([]string, argc)
    67  	for i := int32(0); i < argc; i++ {
    68  		argslice[i] = gostringnocopy(argv_index(argv, i))
    69  	}
    70  }
    71  
    72  func goenvs_unix() {
    73  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    74  	// guarantee env[] will immediately follow argv.  Might cause
    75  	// problems.
    76  	n := int32(0)
    77  	for argv_index(argv, argc+1+n) != nil {
    78  		n++
    79  	}
    80  
    81  	envs = make([]string, n)
    82  	for i := int32(0); i < n; i++ {
    83  		envs[i] = gostring(argv_index(argv, argc+1+i))
    84  	}
    85  }
    86  
    87  func environ() []string {
    88  	return envs
    89  }
    90  
    91  // TODO: These should be locals in testAtomic64, but we don't 8-byte
    92  // align stack variables on 386.
    93  var test_z64, test_x64 uint64
    94  
    95  func testAtomic64() {
    96  	test_z64 = 42
    97  	test_x64 = 0
    98  	prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
    99  	prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
   100  	prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
   101  	prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
   102  	if cas64(&test_z64, test_x64, 1) {
   103  		throw("cas64 failed")
   104  	}
   105  	if test_x64 != 0 {
   106  		throw("cas64 failed")
   107  	}
   108  	test_x64 = 42
   109  	if !cas64(&test_z64, test_x64, 1) {
   110  		throw("cas64 failed")
   111  	}
   112  	if test_x64 != 42 || test_z64 != 1 {
   113  		throw("cas64 failed")
   114  	}
   115  	if atomicload64(&test_z64) != 1 {
   116  		throw("load64 failed")
   117  	}
   118  	atomicstore64(&test_z64, (1<<40)+1)
   119  	if atomicload64(&test_z64) != (1<<40)+1 {
   120  		throw("store64 failed")
   121  	}
   122  	if xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   123  		throw("xadd64 failed")
   124  	}
   125  	if atomicload64(&test_z64) != (2<<40)+2 {
   126  		throw("xadd64 failed")
   127  	}
   128  	if xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   129  		throw("xchg64 failed")
   130  	}
   131  	if atomicload64(&test_z64) != (3<<40)+3 {
   132  		throw("xchg64 failed")
   133  	}
   134  }
   135  
   136  func check() {
   137  	var (
   138  		a     int8
   139  		b     uint8
   140  		c     int16
   141  		d     uint16
   142  		e     int32
   143  		f     uint32
   144  		g     int64
   145  		h     uint64
   146  		i, i1 float32
   147  		j, j1 float64
   148  		k, k1 unsafe.Pointer
   149  		l     *uint16
   150  		m     [4]byte
   151  	)
   152  	type x1t struct {
   153  		x uint8
   154  	}
   155  	type y1t struct {
   156  		x1 x1t
   157  		y  uint8
   158  	}
   159  	var x1 x1t
   160  	var y1 y1t
   161  
   162  	if unsafe.Sizeof(a) != 1 {
   163  		throw("bad a")
   164  	}
   165  	if unsafe.Sizeof(b) != 1 {
   166  		throw("bad b")
   167  	}
   168  	if unsafe.Sizeof(c) != 2 {
   169  		throw("bad c")
   170  	}
   171  	if unsafe.Sizeof(d) != 2 {
   172  		throw("bad d")
   173  	}
   174  	if unsafe.Sizeof(e) != 4 {
   175  		throw("bad e")
   176  	}
   177  	if unsafe.Sizeof(f) != 4 {
   178  		throw("bad f")
   179  	}
   180  	if unsafe.Sizeof(g) != 8 {
   181  		throw("bad g")
   182  	}
   183  	if unsafe.Sizeof(h) != 8 {
   184  		throw("bad h")
   185  	}
   186  	if unsafe.Sizeof(i) != 4 {
   187  		throw("bad i")
   188  	}
   189  	if unsafe.Sizeof(j) != 8 {
   190  		throw("bad j")
   191  	}
   192  	if unsafe.Sizeof(k) != ptrSize {
   193  		throw("bad k")
   194  	}
   195  	if unsafe.Sizeof(l) != ptrSize {
   196  		throw("bad l")
   197  	}
   198  	if unsafe.Sizeof(x1) != 1 {
   199  		throw("bad unsafe.Sizeof x1")
   200  	}
   201  	if unsafe.Offsetof(y1.y) != 1 {
   202  		throw("bad offsetof y1.y")
   203  	}
   204  	if unsafe.Sizeof(y1) != 2 {
   205  		throw("bad unsafe.Sizeof y1")
   206  	}
   207  
   208  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   209  		throw("bad timediv")
   210  	}
   211  
   212  	var z uint32
   213  	z = 1
   214  	if !cas(&z, 1, 2) {
   215  		throw("cas1")
   216  	}
   217  	if z != 2 {
   218  		throw("cas2")
   219  	}
   220  
   221  	z = 4
   222  	if cas(&z, 5, 6) {
   223  		throw("cas3")
   224  	}
   225  	if z != 4 {
   226  		throw("cas4")
   227  	}
   228  
   229  	z = 0xffffffff
   230  	if !cas(&z, 0xffffffff, 0xfffffffe) {
   231  		throw("cas5")
   232  	}
   233  	if z != 0xfffffffe {
   234  		throw("cas6")
   235  	}
   236  
   237  	k = unsafe.Pointer(uintptr(0xfedcb123))
   238  	if ptrSize == 8 {
   239  		k = unsafe.Pointer(uintptr(unsafe.Pointer(k)) << 10)
   240  	}
   241  	if casp(&k, nil, nil) {
   242  		throw("casp1")
   243  	}
   244  	k1 = add(k, 1)
   245  	if !casp(&k, k, k1) {
   246  		throw("casp2")
   247  	}
   248  	if k != k1 {
   249  		throw("casp3")
   250  	}
   251  
   252  	m = [4]byte{1, 1, 1, 1}
   253  	atomicor8(&m[1], 0xf0)
   254  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   255  		throw("atomicor8")
   256  	}
   257  
   258  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   259  	if j == j {
   260  		throw("float64nan")
   261  	}
   262  	if !(j != j) {
   263  		throw("float64nan1")
   264  	}
   265  
   266  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   267  	if j == j1 {
   268  		throw("float64nan2")
   269  	}
   270  	if !(j != j1) {
   271  		throw("float64nan3")
   272  	}
   273  
   274  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   275  	if i == i {
   276  		throw("float32nan")
   277  	}
   278  	if i == i {
   279  		throw("float32nan1")
   280  	}
   281  
   282  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   283  	if i == i1 {
   284  		throw("float32nan2")
   285  	}
   286  	if i == i1 {
   287  		throw("float32nan3")
   288  	}
   289  
   290  	testAtomic64()
   291  
   292  	if _FixedStack != round2(_FixedStack) {
   293  		throw("FixedStack is not power-of-2")
   294  	}
   295  }
   296  
   297  type dbgVar struct {
   298  	name  string
   299  	value *int32
   300  }
   301  
   302  // Holds variables parsed from GODEBUG env var,
   303  // except for "memprofilerate" since there is an
   304  // existing int var for that value, which may
   305  // already have an initial value.
   306  var debug struct {
   307  	allocfreetrace    int32
   308  	efence            int32
   309  	gccheckmark       int32
   310  	gcpacertrace      int32
   311  	gcshrinkstackoff  int32
   312  	gcstackbarrieroff int32
   313  	gcstackbarrierall int32
   314  	gcstoptheworld    int32
   315  	gctrace           int32
   316  	invalidptr        int32
   317  	sbrk              int32
   318  	scavenge          int32
   319  	scheddetail       int32
   320  	schedtrace        int32
   321  	wbshadow          int32
   322  }
   323  
   324  var dbgvars = []dbgVar{
   325  	{"allocfreetrace", &debug.allocfreetrace},
   326  	{"efence", &debug.efence},
   327  	{"gccheckmark", &debug.gccheckmark},
   328  	{"gcpacertrace", &debug.gcpacertrace},
   329  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   330  	{"gcstackbarrieroff", &debug.gcstackbarrieroff},
   331  	{"gcstackbarrierall", &debug.gcstackbarrierall},
   332  	{"gcstoptheworld", &debug.gcstoptheworld},
   333  	{"gctrace", &debug.gctrace},
   334  	{"invalidptr", &debug.invalidptr},
   335  	{"sbrk", &debug.sbrk},
   336  	{"scavenge", &debug.scavenge},
   337  	{"scheddetail", &debug.scheddetail},
   338  	{"schedtrace", &debug.schedtrace},
   339  	{"wbshadow", &debug.wbshadow},
   340  }
   341  
   342  func parsedebugvars() {
   343  	// defaults
   344  	debug.invalidptr = 1
   345  
   346  	for p := gogetenv("GODEBUG"); p != ""; {
   347  		field := ""
   348  		i := index(p, ",")
   349  		if i < 0 {
   350  			field, p = p, ""
   351  		} else {
   352  			field, p = p[:i], p[i+1:]
   353  		}
   354  		i = index(field, "=")
   355  		if i < 0 {
   356  			continue
   357  		}
   358  		key, value := field[:i], field[i+1:]
   359  
   360  		// Update MemProfileRate directly here since it
   361  		// is int, not int32, and should only be updated
   362  		// if specified in GODEBUG.
   363  		if key == "memprofilerate" {
   364  			MemProfileRate = atoi(value)
   365  		} else {
   366  			for _, v := range dbgvars {
   367  				if v.name == key {
   368  					*v.value = int32(atoi(value))
   369  				}
   370  			}
   371  		}
   372  	}
   373  
   374  	switch p := gogetenv("GOTRACEBACK"); p {
   375  	case "none":
   376  		traceback_cache = 0
   377  	case "single", "":
   378  		traceback_cache = 1 << tracebackShift
   379  	case "all":
   380  		traceback_cache = 1<<tracebackShift | tracebackAll
   381  	case "system":
   382  		traceback_cache = 2<<tracebackShift | tracebackAll
   383  	case "crash":
   384  		traceback_cache = 2<<tracebackShift | tracebackAll | tracebackCrash
   385  	default:
   386  		traceback_cache = uint32(atoi(p))<<tracebackShift | tracebackAll
   387  	}
   388  	// when C owns the process, simply exit'ing the process on fatal errors
   389  	// and panics is surprising. Be louder and abort instead.
   390  	if islibrary || isarchive {
   391  		traceback_cache |= tracebackCrash
   392  	}
   393  
   394  	if debug.gcstackbarrierall > 0 {
   395  		firstStackBarrierOffset = 0
   396  	}
   397  }
   398  
   399  // Poor mans 64-bit division.
   400  // This is a very special function, do not use it if you are not sure what you are doing.
   401  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   402  // Handles overflow in a time-specific manner.
   403  //go:nosplit
   404  func timediv(v int64, div int32, rem *int32) int32 {
   405  	res := int32(0)
   406  	for bit := 30; bit >= 0; bit-- {
   407  		if v >= int64(div)<<uint(bit) {
   408  			v = v - (int64(div) << uint(bit))
   409  			res += 1 << uint(bit)
   410  		}
   411  	}
   412  	if v >= int64(div) {
   413  		if rem != nil {
   414  			*rem = 0
   415  		}
   416  		return 0x7fffffff
   417  	}
   418  	if rem != nil {
   419  		*rem = int32(v)
   420  	}
   421  	return res
   422  }
   423  
   424  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   425  
   426  //go:nosplit
   427  func acquirem() *m {
   428  	_g_ := getg()
   429  	_g_.m.locks++
   430  	return _g_.m
   431  }
   432  
   433  //go:nosplit
   434  func releasem(mp *m) {
   435  	_g_ := getg()
   436  	mp.locks--
   437  	if mp.locks == 0 && _g_.preempt {
   438  		// restore the preemption request in case we've cleared it in newstack
   439  		_g_.stackguard0 = stackPreempt
   440  	}
   441  }
   442  
   443  //go:nosplit
   444  func gomcache() *mcache {
   445  	return getg().m.mcache
   446  }
   447  
   448  //go:linkname reflect_typelinks reflect.typelinks
   449  //go:nosplit
   450  func reflect_typelinks() [][]*_type {
   451  	ret := [][]*_type{firstmoduledata.typelinks}
   452  	for datap := firstmoduledata.next; datap != nil; datap = datap.next {
   453  		ret = append(ret, datap.typelinks)
   454  	}
   455  	return ret
   456  }