github.com/panjjo/go@v0.0.0-20161104043856-d62b31386338/src/runtime/runtime1.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Keep a cached value to make gotraceback fast,
    14  // since we call it on every call to gentraceback.
    15  // The cached value is a uint32 in which the low bits
    16  // are the "crash" and "all" settings and the remaining
    17  // bits are the traceback value (0 off, 1 on, 2 include system).
    18  const (
    19  	tracebackCrash = 1 << iota
    20  	tracebackAll
    21  	tracebackShift = iota
    22  )
    23  
    24  var traceback_cache uint32 = 2 << tracebackShift
    25  var traceback_env uint32
    26  
    27  // gotraceback returns the current traceback settings.
    28  //
    29  // If level is 0, suppress all tracebacks.
    30  // If level is 1, show tracebacks, but exclude runtime frames.
    31  // If level is 2, show tracebacks including runtime frames.
    32  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    33  // If crash is set, crash (core dump, etc) after tracebacking.
    34  //
    35  //go:nosplit
    36  func gotraceback() (level int32, all, crash bool) {
    37  	_g_ := getg()
    38  	all = _g_.m.throwing > 0
    39  	if _g_.m.traceback != 0 {
    40  		level = int32(_g_.m.traceback)
    41  		return
    42  	}
    43  	t := atomic.Load(&traceback_cache)
    44  	crash = t&tracebackCrash != 0
    45  	all = all || t&tracebackAll != 0
    46  	level = int32(t >> tracebackShift)
    47  	return
    48  }
    49  
    50  var (
    51  	argc int32
    52  	argv **byte
    53  )
    54  
    55  // nosplit for use in linux startup sysargs
    56  //go:nosplit
    57  func argv_index(argv **byte, i int32) *byte {
    58  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
    59  }
    60  
    61  func args(c int32, v **byte) {
    62  	argc = c
    63  	argv = v
    64  	sysargs(c, v)
    65  }
    66  
    67  func goargs() {
    68  	if GOOS == "windows" {
    69  		return
    70  	}
    71  	argslice = make([]string, argc)
    72  	for i := int32(0); i < argc; i++ {
    73  		argslice[i] = gostringnocopy(argv_index(argv, i))
    74  	}
    75  }
    76  
    77  func goenvs_unix() {
    78  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    79  	// guarantee env[] will immediately follow argv. Might cause
    80  	// problems.
    81  	n := int32(0)
    82  	for argv_index(argv, argc+1+n) != nil {
    83  		n++
    84  	}
    85  
    86  	envs = make([]string, n)
    87  	for i := int32(0); i < n; i++ {
    88  		envs[i] = gostring(argv_index(argv, argc+1+i))
    89  	}
    90  }
    91  
    92  func environ() []string {
    93  	return envs
    94  }
    95  
    96  // TODO: These should be locals in testAtomic64, but we don't 8-byte
    97  // align stack variables on 386.
    98  var test_z64, test_x64 uint64
    99  
   100  func testAtomic64() {
   101  	test_z64 = 42
   102  	test_x64 = 0
   103  	prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
   104  	prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
   105  	prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
   106  	prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
   107  	if atomic.Cas64(&test_z64, test_x64, 1) {
   108  		throw("cas64 failed")
   109  	}
   110  	if test_x64 != 0 {
   111  		throw("cas64 failed")
   112  	}
   113  	test_x64 = 42
   114  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   115  		throw("cas64 failed")
   116  	}
   117  	if test_x64 != 42 || test_z64 != 1 {
   118  		throw("cas64 failed")
   119  	}
   120  	if atomic.Load64(&test_z64) != 1 {
   121  		throw("load64 failed")
   122  	}
   123  	atomic.Store64(&test_z64, (1<<40)+1)
   124  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   125  		throw("store64 failed")
   126  	}
   127  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   128  		throw("xadd64 failed")
   129  	}
   130  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   131  		throw("xadd64 failed")
   132  	}
   133  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   134  		throw("xchg64 failed")
   135  	}
   136  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   137  		throw("xchg64 failed")
   138  	}
   139  }
   140  
   141  func check() {
   142  	var (
   143  		a     int8
   144  		b     uint8
   145  		c     int16
   146  		d     uint16
   147  		e     int32
   148  		f     uint32
   149  		g     int64
   150  		h     uint64
   151  		i, i1 float32
   152  		j, j1 float64
   153  		k, k1 unsafe.Pointer
   154  		l     *uint16
   155  		m     [4]byte
   156  	)
   157  	type x1t struct {
   158  		x uint8
   159  	}
   160  	type y1t struct {
   161  		x1 x1t
   162  		y  uint8
   163  	}
   164  	var x1 x1t
   165  	var y1 y1t
   166  
   167  	if unsafe.Sizeof(a) != 1 {
   168  		throw("bad a")
   169  	}
   170  	if unsafe.Sizeof(b) != 1 {
   171  		throw("bad b")
   172  	}
   173  	if unsafe.Sizeof(c) != 2 {
   174  		throw("bad c")
   175  	}
   176  	if unsafe.Sizeof(d) != 2 {
   177  		throw("bad d")
   178  	}
   179  	if unsafe.Sizeof(e) != 4 {
   180  		throw("bad e")
   181  	}
   182  	if unsafe.Sizeof(f) != 4 {
   183  		throw("bad f")
   184  	}
   185  	if unsafe.Sizeof(g) != 8 {
   186  		throw("bad g")
   187  	}
   188  	if unsafe.Sizeof(h) != 8 {
   189  		throw("bad h")
   190  	}
   191  	if unsafe.Sizeof(i) != 4 {
   192  		throw("bad i")
   193  	}
   194  	if unsafe.Sizeof(j) != 8 {
   195  		throw("bad j")
   196  	}
   197  	if unsafe.Sizeof(k) != sys.PtrSize {
   198  		throw("bad k")
   199  	}
   200  	if unsafe.Sizeof(l) != sys.PtrSize {
   201  		throw("bad l")
   202  	}
   203  	if unsafe.Sizeof(x1) != 1 {
   204  		throw("bad unsafe.Sizeof x1")
   205  	}
   206  	if unsafe.Offsetof(y1.y) != 1 {
   207  		throw("bad offsetof y1.y")
   208  	}
   209  	if unsafe.Sizeof(y1) != 2 {
   210  		throw("bad unsafe.Sizeof y1")
   211  	}
   212  
   213  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   214  		throw("bad timediv")
   215  	}
   216  
   217  	var z uint32
   218  	z = 1
   219  	if !atomic.Cas(&z, 1, 2) {
   220  		throw("cas1")
   221  	}
   222  	if z != 2 {
   223  		throw("cas2")
   224  	}
   225  
   226  	z = 4
   227  	if atomic.Cas(&z, 5, 6) {
   228  		throw("cas3")
   229  	}
   230  	if z != 4 {
   231  		throw("cas4")
   232  	}
   233  
   234  	z = 0xffffffff
   235  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   236  		throw("cas5")
   237  	}
   238  	if z != 0xfffffffe {
   239  		throw("cas6")
   240  	}
   241  
   242  	k = unsafe.Pointer(uintptr(0xfedcb123))
   243  	if sys.PtrSize == 8 {
   244  		k = unsafe.Pointer(uintptr(k) << 10)
   245  	}
   246  	if casp(&k, nil, nil) {
   247  		throw("casp1")
   248  	}
   249  	k1 = add(k, 1)
   250  	if !casp(&k, k, k1) {
   251  		throw("casp2")
   252  	}
   253  	if k != k1 {
   254  		throw("casp3")
   255  	}
   256  
   257  	m = [4]byte{1, 1, 1, 1}
   258  	atomic.Or8(&m[1], 0xf0)
   259  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   260  		throw("atomicor8")
   261  	}
   262  
   263  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   264  	if j == j {
   265  		throw("float64nan")
   266  	}
   267  	if !(j != j) {
   268  		throw("float64nan1")
   269  	}
   270  
   271  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   272  	if j == j1 {
   273  		throw("float64nan2")
   274  	}
   275  	if !(j != j1) {
   276  		throw("float64nan3")
   277  	}
   278  
   279  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   280  	if i == i {
   281  		throw("float32nan")
   282  	}
   283  	if i == i {
   284  		throw("float32nan1")
   285  	}
   286  
   287  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   288  	if i == i1 {
   289  		throw("float32nan2")
   290  	}
   291  	if i == i1 {
   292  		throw("float32nan3")
   293  	}
   294  
   295  	testAtomic64()
   296  
   297  	if _FixedStack != round2(_FixedStack) {
   298  		throw("FixedStack is not power-of-2")
   299  	}
   300  
   301  	if !checkASM() {
   302  		throw("assembly checks failed")
   303  	}
   304  }
   305  
   306  type dbgVar struct {
   307  	name  string
   308  	value *int32
   309  }
   310  
   311  // Holds variables parsed from GODEBUG env var,
   312  // except for "memprofilerate" since there is an
   313  // existing int var for that value, which may
   314  // already have an initial value.
   315  var debug struct {
   316  	allocfreetrace    int32
   317  	cgocheck          int32
   318  	efence            int32
   319  	gccheckmark       int32
   320  	gcpacertrace      int32
   321  	gcshrinkstackoff  int32
   322  	gcstackbarrieroff int32
   323  	gcstackbarrierall int32
   324  	gcrescanstacks    int32
   325  	gcstoptheworld    int32
   326  	gctrace           int32
   327  	invalidptr        int32
   328  	sbrk              int32
   329  	scavenge          int32
   330  	scheddetail       int32
   331  	schedtrace        int32
   332  	wbshadow          int32
   333  }
   334  
   335  var dbgvars = []dbgVar{
   336  	{"allocfreetrace", &debug.allocfreetrace},
   337  	{"cgocheck", &debug.cgocheck},
   338  	{"efence", &debug.efence},
   339  	{"gccheckmark", &debug.gccheckmark},
   340  	{"gcpacertrace", &debug.gcpacertrace},
   341  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   342  	{"gcstackbarrieroff", &debug.gcstackbarrieroff},
   343  	{"gcstackbarrierall", &debug.gcstackbarrierall},
   344  	{"gcrescanstacks", &debug.gcrescanstacks},
   345  	{"gcstoptheworld", &debug.gcstoptheworld},
   346  	{"gctrace", &debug.gctrace},
   347  	{"invalidptr", &debug.invalidptr},
   348  	{"sbrk", &debug.sbrk},
   349  	{"scavenge", &debug.scavenge},
   350  	{"scheddetail", &debug.scheddetail},
   351  	{"schedtrace", &debug.schedtrace},
   352  	{"wbshadow", &debug.wbshadow},
   353  }
   354  
   355  func parsedebugvars() {
   356  	// defaults
   357  	debug.cgocheck = 1
   358  	debug.invalidptr = 1
   359  
   360  	for p := gogetenv("GODEBUG"); p != ""; {
   361  		field := ""
   362  		i := index(p, ",")
   363  		if i < 0 {
   364  			field, p = p, ""
   365  		} else {
   366  			field, p = p[:i], p[i+1:]
   367  		}
   368  		i = index(field, "=")
   369  		if i < 0 {
   370  			continue
   371  		}
   372  		key, value := field[:i], field[i+1:]
   373  
   374  		// Update MemProfileRate directly here since it
   375  		// is int, not int32, and should only be updated
   376  		// if specified in GODEBUG.
   377  		if key == "memprofilerate" {
   378  			if n, ok := atoi(value); ok {
   379  				MemProfileRate = n
   380  			}
   381  		} else {
   382  			for _, v := range dbgvars {
   383  				if v.name == key {
   384  					if n, ok := atoi32(value); ok {
   385  						*v.value = n
   386  					}
   387  				}
   388  			}
   389  		}
   390  	}
   391  
   392  	setTraceback(gogetenv("GOTRACEBACK"))
   393  	traceback_env = traceback_cache
   394  
   395  	if debug.gcrescanstacks == 0 {
   396  		// Without rescanning, there's no need for stack
   397  		// barriers.
   398  		debug.gcstackbarrieroff = 1
   399  		debug.gcstackbarrierall = 0
   400  	}
   401  
   402  	if debug.gcstackbarrierall > 0 {
   403  		firstStackBarrierOffset = 0
   404  	}
   405  
   406  	// For cgocheck > 1, we turn on the write barrier at all times
   407  	// and check all pointer writes.
   408  	if debug.cgocheck > 1 {
   409  		writeBarrier.cgo = true
   410  		writeBarrier.enabled = true
   411  	}
   412  }
   413  
   414  //go:linkname setTraceback runtime/debug.SetTraceback
   415  func setTraceback(level string) {
   416  	var t uint32
   417  	switch level {
   418  	case "none":
   419  		t = 0
   420  	case "single", "":
   421  		t = 1 << tracebackShift
   422  	case "all":
   423  		t = 1<<tracebackShift | tracebackAll
   424  	case "system":
   425  		t = 2<<tracebackShift | tracebackAll
   426  	case "crash":
   427  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   428  	default:
   429  		t = tracebackAll
   430  		if n, ok := atoi(level); ok && n == int(uint32(n)) {
   431  			t |= uint32(n) << tracebackShift
   432  		}
   433  	}
   434  	// when C owns the process, simply exit'ing the process on fatal errors
   435  	// and panics is surprising. Be louder and abort instead.
   436  	if islibrary || isarchive {
   437  		t |= tracebackCrash
   438  	}
   439  
   440  	t |= traceback_env
   441  
   442  	atomic.Store(&traceback_cache, t)
   443  }
   444  
   445  // Poor mans 64-bit division.
   446  // This is a very special function, do not use it if you are not sure what you are doing.
   447  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   448  // Handles overflow in a time-specific manner.
   449  //go:nosplit
   450  func timediv(v int64, div int32, rem *int32) int32 {
   451  	res := int32(0)
   452  	for bit := 30; bit >= 0; bit-- {
   453  		if v >= int64(div)<<uint(bit) {
   454  			v = v - (int64(div) << uint(bit))
   455  			res += 1 << uint(bit)
   456  		}
   457  	}
   458  	if v >= int64(div) {
   459  		if rem != nil {
   460  			*rem = 0
   461  		}
   462  		return 0x7fffffff
   463  	}
   464  	if rem != nil {
   465  		*rem = int32(v)
   466  	}
   467  	return res
   468  }
   469  
   470  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   471  
   472  //go:nosplit
   473  func acquirem() *m {
   474  	_g_ := getg()
   475  	_g_.m.locks++
   476  	return _g_.m
   477  }
   478  
   479  //go:nosplit
   480  func releasem(mp *m) {
   481  	_g_ := getg()
   482  	mp.locks--
   483  	if mp.locks == 0 && _g_.preempt {
   484  		// restore the preemption request in case we've cleared it in newstack
   485  		_g_.stackguard0 = stackPreempt
   486  	}
   487  }
   488  
   489  //go:nosplit
   490  func gomcache() *mcache {
   491  	return getg().m.mcache
   492  }
   493  
   494  //go:linkname reflect_typelinks reflect.typelinks
   495  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   496  	modules := activeModules()
   497  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   498  	ret := [][]int32{modules[0].typelinks}
   499  	for _, md := range modules[1:] {
   500  		sections = append(sections, unsafe.Pointer(md.types))
   501  		ret = append(ret, md.typelinks)
   502  	}
   503  	return sections, ret
   504  }
   505  
   506  // reflect_resolveNameOff resolves a name offset from a base pointer.
   507  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   508  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   509  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   510  }
   511  
   512  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   513  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   514  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   515  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   516  }
   517  
   518  // reflect_resolveTextOff resolves an function pointer offset from a base type.
   519  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   520  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   521  	return (*_type)(rtype).textOff(textOff(off))
   522  
   523  }
   524  
   525  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   526  //go:linkname reflect_addReflectOff reflect.addReflectOff
   527  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   528  	reflectOffsLock()
   529  	if reflectOffs.m == nil {
   530  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   531  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   532  		reflectOffs.next = -1
   533  	}
   534  	id, found := reflectOffs.minv[ptr]
   535  	if !found {
   536  		id = reflectOffs.next
   537  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   538  		reflectOffs.m[id] = ptr
   539  		reflectOffs.minv[ptr] = id
   540  	}
   541  	reflectOffsUnlock()
   542  	return id
   543  }