github.com/hikaru7719/go@v0.0.0-20181025140707-c8b2ac68906a/src/runtime/runtime1.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Keep a cached value to make gotraceback fast,
    14  // since we call it on every call to gentraceback.
    15  // The cached value is a uint32 in which the low bits
    16  // are the "crash" and "all" settings and the remaining
    17  // bits are the traceback value (0 off, 1 on, 2 include system).
    18  const (
    19  	tracebackCrash = 1 << iota
    20  	tracebackAll
    21  	tracebackShift = iota
    22  )
    23  
    24  var traceback_cache uint32 = 2 << tracebackShift
    25  var traceback_env uint32
    26  
    27  // gotraceback returns the current traceback settings.
    28  //
    29  // If level is 0, suppress all tracebacks.
    30  // If level is 1, show tracebacks, but exclude runtime frames.
    31  // If level is 2, show tracebacks including runtime frames.
    32  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    33  // If crash is set, crash (core dump, etc) after tracebacking.
    34  //
    35  //go:nosplit
    36  func gotraceback() (level int32, all, crash bool) {
    37  	_g_ := getg()
    38  	t := atomic.Load(&traceback_cache)
    39  	crash = t&tracebackCrash != 0
    40  	all = _g_.m.throwing > 0 || t&tracebackAll != 0
    41  	if _g_.m.traceback != 0 {
    42  		level = int32(_g_.m.traceback)
    43  	} else {
    44  		level = int32(t >> tracebackShift)
    45  	}
    46  	return
    47  }
    48  
    49  var (
    50  	argc int32
    51  	argv **byte
    52  )
    53  
    54  // nosplit for use in linux startup sysargs
    55  //go:nosplit
    56  func argv_index(argv **byte, i int32) *byte {
    57  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
    58  }
    59  
    60  func args(c int32, v **byte) {
    61  	argc = c
    62  	argv = v
    63  	sysargs(c, v)
    64  }
    65  
    66  func goargs() {
    67  	if GOOS == "windows" {
    68  		return
    69  	}
    70  	argslice = make([]string, argc)
    71  	for i := int32(0); i < argc; i++ {
    72  		argslice[i] = gostringnocopy(argv_index(argv, i))
    73  	}
    74  }
    75  
    76  func goenvs_unix() {
    77  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    78  	// guarantee env[] will immediately follow argv. Might cause
    79  	// problems.
    80  	n := int32(0)
    81  	for argv_index(argv, argc+1+n) != nil {
    82  		n++
    83  	}
    84  
    85  	envs = make([]string, n)
    86  	for i := int32(0); i < n; i++ {
    87  		envs[i] = gostring(argv_index(argv, argc+1+i))
    88  	}
    89  }
    90  
    91  func environ() []string {
    92  	return envs
    93  }
    94  
    95  // TODO: These should be locals in testAtomic64, but we don't 8-byte
    96  // align stack variables on 386.
    97  var test_z64, test_x64 uint64
    98  
    99  func testAtomic64() {
   100  	test_z64 = 42
   101  	test_x64 = 0
   102  	if atomic.Cas64(&test_z64, test_x64, 1) {
   103  		throw("cas64 failed")
   104  	}
   105  	if test_x64 != 0 {
   106  		throw("cas64 failed")
   107  	}
   108  	test_x64 = 42
   109  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   110  		throw("cas64 failed")
   111  	}
   112  	if test_x64 != 42 || test_z64 != 1 {
   113  		throw("cas64 failed")
   114  	}
   115  	if atomic.Load64(&test_z64) != 1 {
   116  		throw("load64 failed")
   117  	}
   118  	atomic.Store64(&test_z64, (1<<40)+1)
   119  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   120  		throw("store64 failed")
   121  	}
   122  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   123  		throw("xadd64 failed")
   124  	}
   125  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   126  		throw("xadd64 failed")
   127  	}
   128  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   129  		throw("xchg64 failed")
   130  	}
   131  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   132  		throw("xchg64 failed")
   133  	}
   134  }
   135  
   136  func check() {
   137  	var (
   138  		a     int8
   139  		b     uint8
   140  		c     int16
   141  		d     uint16
   142  		e     int32
   143  		f     uint32
   144  		g     int64
   145  		h     uint64
   146  		i, i1 float32
   147  		j, j1 float64
   148  		k     unsafe.Pointer
   149  		l     *uint16
   150  		m     [4]byte
   151  	)
   152  	type x1t struct {
   153  		x uint8
   154  	}
   155  	type y1t struct {
   156  		x1 x1t
   157  		y  uint8
   158  	}
   159  	var x1 x1t
   160  	var y1 y1t
   161  
   162  	if unsafe.Sizeof(a) != 1 {
   163  		throw("bad a")
   164  	}
   165  	if unsafe.Sizeof(b) != 1 {
   166  		throw("bad b")
   167  	}
   168  	if unsafe.Sizeof(c) != 2 {
   169  		throw("bad c")
   170  	}
   171  	if unsafe.Sizeof(d) != 2 {
   172  		throw("bad d")
   173  	}
   174  	if unsafe.Sizeof(e) != 4 {
   175  		throw("bad e")
   176  	}
   177  	if unsafe.Sizeof(f) != 4 {
   178  		throw("bad f")
   179  	}
   180  	if unsafe.Sizeof(g) != 8 {
   181  		throw("bad g")
   182  	}
   183  	if unsafe.Sizeof(h) != 8 {
   184  		throw("bad h")
   185  	}
   186  	if unsafe.Sizeof(i) != 4 {
   187  		throw("bad i")
   188  	}
   189  	if unsafe.Sizeof(j) != 8 {
   190  		throw("bad j")
   191  	}
   192  	if unsafe.Sizeof(k) != sys.PtrSize {
   193  		throw("bad k")
   194  	}
   195  	if unsafe.Sizeof(l) != sys.PtrSize {
   196  		throw("bad l")
   197  	}
   198  	if unsafe.Sizeof(x1) != 1 {
   199  		throw("bad unsafe.Sizeof x1")
   200  	}
   201  	if unsafe.Offsetof(y1.y) != 1 {
   202  		throw("bad offsetof y1.y")
   203  	}
   204  	if unsafe.Sizeof(y1) != 2 {
   205  		throw("bad unsafe.Sizeof y1")
   206  	}
   207  
   208  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   209  		throw("bad timediv")
   210  	}
   211  
   212  	var z uint32
   213  	z = 1
   214  	if !atomic.Cas(&z, 1, 2) {
   215  		throw("cas1")
   216  	}
   217  	if z != 2 {
   218  		throw("cas2")
   219  	}
   220  
   221  	z = 4
   222  	if atomic.Cas(&z, 5, 6) {
   223  		throw("cas3")
   224  	}
   225  	if z != 4 {
   226  		throw("cas4")
   227  	}
   228  
   229  	z = 0xffffffff
   230  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   231  		throw("cas5")
   232  	}
   233  	if z != 0xfffffffe {
   234  		throw("cas6")
   235  	}
   236  
   237  	m = [4]byte{1, 1, 1, 1}
   238  	atomic.Or8(&m[1], 0xf0)
   239  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   240  		throw("atomicor8")
   241  	}
   242  
   243  	m = [4]byte{0xff, 0xff, 0xff, 0xff}
   244  	atomic.And8(&m[1], 0x1)
   245  	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   246  		throw("atomicand8")
   247  	}
   248  
   249  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   250  	if j == j {
   251  		throw("float64nan")
   252  	}
   253  	if !(j != j) {
   254  		throw("float64nan1")
   255  	}
   256  
   257  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   258  	if j == j1 {
   259  		throw("float64nan2")
   260  	}
   261  	if !(j != j1) {
   262  		throw("float64nan3")
   263  	}
   264  
   265  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   266  	if i == i {
   267  		throw("float32nan")
   268  	}
   269  	if i == i {
   270  		throw("float32nan1")
   271  	}
   272  
   273  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   274  	if i == i1 {
   275  		throw("float32nan2")
   276  	}
   277  	if i == i1 {
   278  		throw("float32nan3")
   279  	}
   280  
   281  	testAtomic64()
   282  
   283  	if _FixedStack != round2(_FixedStack) {
   284  		throw("FixedStack is not power-of-2")
   285  	}
   286  
   287  	if !checkASM() {
   288  		throw("assembly checks failed")
   289  	}
   290  }
   291  
   292  type dbgVar struct {
   293  	name  string
   294  	value *int32
   295  }
   296  
   297  // Holds variables parsed from GODEBUG env var,
   298  // except for "memprofilerate" since there is an
   299  // existing int var for that value, which may
   300  // already have an initial value.
   301  var debug struct {
   302  	allocfreetrace     int32
   303  	cgocheck           int32
   304  	efence             int32
   305  	gccheckmark        int32
   306  	gcpacertrace       int32
   307  	gcshrinkstackoff   int32
   308  	gcstoptheworld     int32
   309  	gctrace            int32
   310  	invalidptr         int32
   311  	sbrk               int32
   312  	scavenge           int32
   313  	scheddetail        int32
   314  	schedtrace         int32
   315  	tracebackancestors int32
   316  }
   317  
   318  var dbgvars = []dbgVar{
   319  	{"allocfreetrace", &debug.allocfreetrace},
   320  	{"cgocheck", &debug.cgocheck},
   321  	{"efence", &debug.efence},
   322  	{"gccheckmark", &debug.gccheckmark},
   323  	{"gcpacertrace", &debug.gcpacertrace},
   324  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   325  	{"gcstoptheworld", &debug.gcstoptheworld},
   326  	{"gctrace", &debug.gctrace},
   327  	{"invalidptr", &debug.invalidptr},
   328  	{"sbrk", &debug.sbrk},
   329  	{"scavenge", &debug.scavenge},
   330  	{"scheddetail", &debug.scheddetail},
   331  	{"schedtrace", &debug.schedtrace},
   332  	{"tracebackancestors", &debug.tracebackancestors},
   333  }
   334  
   335  func parsedebugvars() {
   336  	// defaults
   337  	debug.cgocheck = 1
   338  	debug.invalidptr = 1
   339  
   340  	for p := gogetenv("GODEBUG"); p != ""; {
   341  		field := ""
   342  		i := index(p, ",")
   343  		if i < 0 {
   344  			field, p = p, ""
   345  		} else {
   346  			field, p = p[:i], p[i+1:]
   347  		}
   348  		i = index(field, "=")
   349  		if i < 0 {
   350  			continue
   351  		}
   352  		key, value := field[:i], field[i+1:]
   353  
   354  		// Update MemProfileRate directly here since it
   355  		// is int, not int32, and should only be updated
   356  		// if specified in GODEBUG.
   357  		if key == "memprofilerate" {
   358  			if n, ok := atoi(value); ok {
   359  				MemProfileRate = n
   360  			}
   361  		} else {
   362  			for _, v := range dbgvars {
   363  				if v.name == key {
   364  					if n, ok := atoi32(value); ok {
   365  						*v.value = n
   366  					}
   367  				}
   368  			}
   369  		}
   370  	}
   371  
   372  	setTraceback(gogetenv("GOTRACEBACK"))
   373  	traceback_env = traceback_cache
   374  }
   375  
   376  //go:linkname setTraceback runtime/debug.SetTraceback
   377  func setTraceback(level string) {
   378  	var t uint32
   379  	switch level {
   380  	case "none":
   381  		t = 0
   382  	case "single", "":
   383  		t = 1 << tracebackShift
   384  	case "all":
   385  		t = 1<<tracebackShift | tracebackAll
   386  	case "system":
   387  		t = 2<<tracebackShift | tracebackAll
   388  	case "crash":
   389  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   390  	default:
   391  		t = tracebackAll
   392  		if n, ok := atoi(level); ok && n == int(uint32(n)) {
   393  			t |= uint32(n) << tracebackShift
   394  		}
   395  	}
   396  	// when C owns the process, simply exit'ing the process on fatal errors
   397  	// and panics is surprising. Be louder and abort instead.
   398  	if islibrary || isarchive {
   399  		t |= tracebackCrash
   400  	}
   401  
   402  	t |= traceback_env
   403  
   404  	atomic.Store(&traceback_cache, t)
   405  }
   406  
   407  // Poor mans 64-bit division.
   408  // This is a very special function, do not use it if you are not sure what you are doing.
   409  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   410  // Handles overflow in a time-specific manner.
   411  //go:nosplit
   412  func timediv(v int64, div int32, rem *int32) int32 {
   413  	res := int32(0)
   414  	for bit := 30; bit >= 0; bit-- {
   415  		if v >= int64(div)<<uint(bit) {
   416  			v = v - (int64(div) << uint(bit))
   417  			// Before this for loop, res was 0, thus all these
   418  			// power of 2 increments are now just bitsets.
   419  			res |= 1 << uint(bit)
   420  		}
   421  	}
   422  	if v >= int64(div) {
   423  		if rem != nil {
   424  			*rem = 0
   425  		}
   426  		return 0x7fffffff
   427  	}
   428  	if rem != nil {
   429  		*rem = int32(v)
   430  	}
   431  	return res
   432  }
   433  
   434  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   435  
   436  //go:nosplit
   437  func acquirem() *m {
   438  	_g_ := getg()
   439  	_g_.m.locks++
   440  	return _g_.m
   441  }
   442  
   443  //go:nosplit
   444  func releasem(mp *m) {
   445  	_g_ := getg()
   446  	mp.locks--
   447  	if mp.locks == 0 && _g_.preempt {
   448  		// restore the preemption request in case we've cleared it in newstack
   449  		_g_.stackguard0 = stackPreempt
   450  	}
   451  }
   452  
   453  //go:nosplit
   454  func gomcache() *mcache {
   455  	return getg().m.mcache
   456  }
   457  
   458  //go:linkname reflect_typelinks reflect.typelinks
   459  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   460  	modules := activeModules()
   461  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   462  	ret := [][]int32{modules[0].typelinks}
   463  	for _, md := range modules[1:] {
   464  		sections = append(sections, unsafe.Pointer(md.types))
   465  		ret = append(ret, md.typelinks)
   466  	}
   467  	return sections, ret
   468  }
   469  
   470  // reflect_resolveNameOff resolves a name offset from a base pointer.
   471  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   472  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   473  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   474  }
   475  
   476  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   477  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   478  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   479  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   480  }
   481  
   482  // reflect_resolveTextOff resolves an function pointer offset from a base type.
   483  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   484  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   485  	return (*_type)(rtype).textOff(textOff(off))
   486  
   487  }
   488  
   489  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   490  //go:linkname reflect_addReflectOff reflect.addReflectOff
   491  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   492  	reflectOffsLock()
   493  	if reflectOffs.m == nil {
   494  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   495  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   496  		reflectOffs.next = -1
   497  	}
   498  	id, found := reflectOffs.minv[ptr]
   499  	if !found {
   500  		id = reflectOffs.next
   501  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   502  		reflectOffs.m[id] = ptr
   503  		reflectOffs.minv[ptr] = id
   504  	}
   505  	reflectOffsUnlock()
   506  	return id
   507  }