github.com/sanprasirt/go@v0.0.0-20170607001320-a027466e4b6d/src/runtime/runtime1.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Keep a cached value to make gotraceback fast,
    14  // since we call it on every call to gentraceback.
    15  // The cached value is a uint32 in which the low bits
    16  // are the "crash" and "all" settings and the remaining
    17  // bits are the traceback value (0 off, 1 on, 2 include system).
    18  const (
    19  	tracebackCrash = 1 << iota
    20  	tracebackAll
    21  	tracebackShift = iota
    22  )
    23  
    24  var traceback_cache uint32 = 2 << tracebackShift
    25  var traceback_env uint32
    26  
    27  // gotraceback returns the current traceback settings.
    28  //
    29  // If level is 0, suppress all tracebacks.
    30  // If level is 1, show tracebacks, but exclude runtime frames.
    31  // If level is 2, show tracebacks including runtime frames.
    32  // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
    33  // If crash is set, crash (core dump, etc) after tracebacking.
    34  //
    35  //go:nosplit
    36  func gotraceback() (level int32, all, crash bool) {
    37  	_g_ := getg()
    38  	t := atomic.Load(&traceback_cache)
    39  	crash = t&tracebackCrash != 0
    40  	all = _g_.m.throwing > 0 || t&tracebackAll != 0
    41  	if _g_.m.traceback != 0 {
    42  		level = int32(_g_.m.traceback)
    43  	} else {
    44  		level = int32(t >> tracebackShift)
    45  	}
    46  	return
    47  }
    48  
    49  var (
    50  	argc int32
    51  	argv **byte
    52  )
    53  
    54  // nosplit for use in linux startup sysargs
    55  //go:nosplit
    56  func argv_index(argv **byte, i int32) *byte {
    57  	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
    58  }
    59  
    60  func args(c int32, v **byte) {
    61  	argc = c
    62  	argv = v
    63  	sysargs(c, v)
    64  }
    65  
    66  func goargs() {
    67  	if GOOS == "windows" {
    68  		return
    69  	}
    70  	argslice = make([]string, argc)
    71  	for i := int32(0); i < argc; i++ {
    72  		argslice[i] = gostringnocopy(argv_index(argv, i))
    73  	}
    74  }
    75  
    76  func goenvs_unix() {
    77  	// TODO(austin): ppc64 in dynamic linking mode doesn't
    78  	// guarantee env[] will immediately follow argv. Might cause
    79  	// problems.
    80  	n := int32(0)
    81  	for argv_index(argv, argc+1+n) != nil {
    82  		n++
    83  	}
    84  
    85  	envs = make([]string, n)
    86  	for i := int32(0); i < n; i++ {
    87  		envs[i] = gostring(argv_index(argv, argc+1+i))
    88  	}
    89  }
    90  
    91  func environ() []string {
    92  	return envs
    93  }
    94  
    95  // TODO: These should be locals in testAtomic64, but we don't 8-byte
    96  // align stack variables on 386.
    97  var test_z64, test_x64 uint64
    98  
    99  func testAtomic64() {
   100  	test_z64 = 42
   101  	test_x64 = 0
   102  	prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
   103  	prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
   104  	prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
   105  	prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
   106  	if atomic.Cas64(&test_z64, test_x64, 1) {
   107  		throw("cas64 failed")
   108  	}
   109  	if test_x64 != 0 {
   110  		throw("cas64 failed")
   111  	}
   112  	test_x64 = 42
   113  	if !atomic.Cas64(&test_z64, test_x64, 1) {
   114  		throw("cas64 failed")
   115  	}
   116  	if test_x64 != 42 || test_z64 != 1 {
   117  		throw("cas64 failed")
   118  	}
   119  	if atomic.Load64(&test_z64) != 1 {
   120  		throw("load64 failed")
   121  	}
   122  	atomic.Store64(&test_z64, (1<<40)+1)
   123  	if atomic.Load64(&test_z64) != (1<<40)+1 {
   124  		throw("store64 failed")
   125  	}
   126  	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
   127  		throw("xadd64 failed")
   128  	}
   129  	if atomic.Load64(&test_z64) != (2<<40)+2 {
   130  		throw("xadd64 failed")
   131  	}
   132  	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
   133  		throw("xchg64 failed")
   134  	}
   135  	if atomic.Load64(&test_z64) != (3<<40)+3 {
   136  		throw("xchg64 failed")
   137  	}
   138  }
   139  
   140  func check() {
   141  	var (
   142  		a     int8
   143  		b     uint8
   144  		c     int16
   145  		d     uint16
   146  		e     int32
   147  		f     uint32
   148  		g     int64
   149  		h     uint64
   150  		i, i1 float32
   151  		j, j1 float64
   152  		k, k1 unsafe.Pointer
   153  		l     *uint16
   154  		m     [4]byte
   155  	)
   156  	type x1t struct {
   157  		x uint8
   158  	}
   159  	type y1t struct {
   160  		x1 x1t
   161  		y  uint8
   162  	}
   163  	var x1 x1t
   164  	var y1 y1t
   165  
   166  	if unsafe.Sizeof(a) != 1 {
   167  		throw("bad a")
   168  	}
   169  	if unsafe.Sizeof(b) != 1 {
   170  		throw("bad b")
   171  	}
   172  	if unsafe.Sizeof(c) != 2 {
   173  		throw("bad c")
   174  	}
   175  	if unsafe.Sizeof(d) != 2 {
   176  		throw("bad d")
   177  	}
   178  	if unsafe.Sizeof(e) != 4 {
   179  		throw("bad e")
   180  	}
   181  	if unsafe.Sizeof(f) != 4 {
   182  		throw("bad f")
   183  	}
   184  	if unsafe.Sizeof(g) != 8 {
   185  		throw("bad g")
   186  	}
   187  	if unsafe.Sizeof(h) != 8 {
   188  		throw("bad h")
   189  	}
   190  	if unsafe.Sizeof(i) != 4 {
   191  		throw("bad i")
   192  	}
   193  	if unsafe.Sizeof(j) != 8 {
   194  		throw("bad j")
   195  	}
   196  	if unsafe.Sizeof(k) != sys.PtrSize {
   197  		throw("bad k")
   198  	}
   199  	if unsafe.Sizeof(l) != sys.PtrSize {
   200  		throw("bad l")
   201  	}
   202  	if unsafe.Sizeof(x1) != 1 {
   203  		throw("bad unsafe.Sizeof x1")
   204  	}
   205  	if unsafe.Offsetof(y1.y) != 1 {
   206  		throw("bad offsetof y1.y")
   207  	}
   208  	if unsafe.Sizeof(y1) != 2 {
   209  		throw("bad unsafe.Sizeof y1")
   210  	}
   211  
   212  	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
   213  		throw("bad timediv")
   214  	}
   215  
   216  	var z uint32
   217  	z = 1
   218  	if !atomic.Cas(&z, 1, 2) {
   219  		throw("cas1")
   220  	}
   221  	if z != 2 {
   222  		throw("cas2")
   223  	}
   224  
   225  	z = 4
   226  	if atomic.Cas(&z, 5, 6) {
   227  		throw("cas3")
   228  	}
   229  	if z != 4 {
   230  		throw("cas4")
   231  	}
   232  
   233  	z = 0xffffffff
   234  	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
   235  		throw("cas5")
   236  	}
   237  	if z != 0xfffffffe {
   238  		throw("cas6")
   239  	}
   240  
   241  	k = unsafe.Pointer(uintptr(0xfedcb123))
   242  	if sys.PtrSize == 8 {
   243  		k = unsafe.Pointer(uintptr(k) << 10)
   244  	}
   245  	if casp(&k, nil, nil) {
   246  		throw("casp1")
   247  	}
   248  	k1 = add(k, 1)
   249  	if !casp(&k, k, k1) {
   250  		throw("casp2")
   251  	}
   252  	if k != k1 {
   253  		throw("casp3")
   254  	}
   255  
   256  	m = [4]byte{1, 1, 1, 1}
   257  	atomic.Or8(&m[1], 0xf0)
   258  	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
   259  		throw("atomicor8")
   260  	}
   261  
   262  	m = [4]byte{0xff, 0xff, 0xff, 0xff}
   263  	atomic.And8(&m[1], 0x1)
   264  	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
   265  		throw("atomicand8")
   266  	}
   267  
   268  	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
   269  	if j == j {
   270  		throw("float64nan")
   271  	}
   272  	if !(j != j) {
   273  		throw("float64nan1")
   274  	}
   275  
   276  	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
   277  	if j == j1 {
   278  		throw("float64nan2")
   279  	}
   280  	if !(j != j1) {
   281  		throw("float64nan3")
   282  	}
   283  
   284  	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
   285  	if i == i {
   286  		throw("float32nan")
   287  	}
   288  	if i == i {
   289  		throw("float32nan1")
   290  	}
   291  
   292  	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
   293  	if i == i1 {
   294  		throw("float32nan2")
   295  	}
   296  	if i == i1 {
   297  		throw("float32nan3")
   298  	}
   299  
   300  	testAtomic64()
   301  
   302  	if _FixedStack != round2(_FixedStack) {
   303  		throw("FixedStack is not power-of-2")
   304  	}
   305  
   306  	if !checkASM() {
   307  		throw("assembly checks failed")
   308  	}
   309  }
   310  
   311  type dbgVar struct {
   312  	name  string
   313  	value *int32
   314  }
   315  
   316  // Holds variables parsed from GODEBUG env var,
   317  // except for "memprofilerate" since there is an
   318  // existing int var for that value, which may
   319  // already have an initial value.
   320  var debug struct {
   321  	allocfreetrace   int32
   322  	cgocheck         int32
   323  	efence           int32
   324  	gccheckmark      int32
   325  	gcpacertrace     int32
   326  	gcshrinkstackoff int32
   327  	gcrescanstacks   int32
   328  	gcstoptheworld   int32
   329  	gctrace          int32
   330  	invalidptr       int32
   331  	sbrk             int32
   332  	scavenge         int32
   333  	scheddetail      int32
   334  	schedtrace       int32
   335  }
   336  
   337  var dbgvars = []dbgVar{
   338  	{"allocfreetrace", &debug.allocfreetrace},
   339  	{"cgocheck", &debug.cgocheck},
   340  	{"efence", &debug.efence},
   341  	{"gccheckmark", &debug.gccheckmark},
   342  	{"gcpacertrace", &debug.gcpacertrace},
   343  	{"gcshrinkstackoff", &debug.gcshrinkstackoff},
   344  	{"gcrescanstacks", &debug.gcrescanstacks},
   345  	{"gcstoptheworld", &debug.gcstoptheworld},
   346  	{"gctrace", &debug.gctrace},
   347  	{"invalidptr", &debug.invalidptr},
   348  	{"sbrk", &debug.sbrk},
   349  	{"scavenge", &debug.scavenge},
   350  	{"scheddetail", &debug.scheddetail},
   351  	{"schedtrace", &debug.schedtrace},
   352  }
   353  
   354  func parsedebugvars() {
   355  	// defaults
   356  	debug.cgocheck = 1
   357  	debug.invalidptr = 1
   358  
   359  	for p := gogetenv("GODEBUG"); p != ""; {
   360  		field := ""
   361  		i := index(p, ",")
   362  		if i < 0 {
   363  			field, p = p, ""
   364  		} else {
   365  			field, p = p[:i], p[i+1:]
   366  		}
   367  		i = index(field, "=")
   368  		if i < 0 {
   369  			continue
   370  		}
   371  		key, value := field[:i], field[i+1:]
   372  
   373  		// Update MemProfileRate directly here since it
   374  		// is int, not int32, and should only be updated
   375  		// if specified in GODEBUG.
   376  		if key == "memprofilerate" {
   377  			if n, ok := atoi(value); ok {
   378  				MemProfileRate = n
   379  			}
   380  		} else {
   381  			for _, v := range dbgvars {
   382  				if v.name == key {
   383  					if n, ok := atoi32(value); ok {
   384  						*v.value = n
   385  					}
   386  				}
   387  			}
   388  		}
   389  	}
   390  
   391  	setTraceback(gogetenv("GOTRACEBACK"))
   392  	traceback_env = traceback_cache
   393  
   394  	// For cgocheck > 1, we turn on the write barrier at all times
   395  	// and check all pointer writes.
   396  	if debug.cgocheck > 1 {
   397  		writeBarrier.cgo = true
   398  		writeBarrier.enabled = true
   399  	}
   400  }
   401  
   402  //go:linkname setTraceback runtime/debug.SetTraceback
   403  func setTraceback(level string) {
   404  	var t uint32
   405  	switch level {
   406  	case "none":
   407  		t = 0
   408  	case "single", "":
   409  		t = 1 << tracebackShift
   410  	case "all":
   411  		t = 1<<tracebackShift | tracebackAll
   412  	case "system":
   413  		t = 2<<tracebackShift | tracebackAll
   414  	case "crash":
   415  		t = 2<<tracebackShift | tracebackAll | tracebackCrash
   416  	default:
   417  		t = tracebackAll
   418  		if n, ok := atoi(level); ok && n == int(uint32(n)) {
   419  			t |= uint32(n) << tracebackShift
   420  		}
   421  	}
   422  	// when C owns the process, simply exit'ing the process on fatal errors
   423  	// and panics is surprising. Be louder and abort instead.
   424  	if islibrary || isarchive {
   425  		t |= tracebackCrash
   426  	}
   427  
   428  	t |= traceback_env
   429  
   430  	atomic.Store(&traceback_cache, t)
   431  }
   432  
   433  // Poor mans 64-bit division.
   434  // This is a very special function, do not use it if you are not sure what you are doing.
   435  // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
   436  // Handles overflow in a time-specific manner.
   437  //go:nosplit
   438  func timediv(v int64, div int32, rem *int32) int32 {
   439  	res := int32(0)
   440  	for bit := 30; bit >= 0; bit-- {
   441  		if v >= int64(div)<<uint(bit) {
   442  			v = v - (int64(div) << uint(bit))
   443  			res += 1 << uint(bit)
   444  		}
   445  	}
   446  	if v >= int64(div) {
   447  		if rem != nil {
   448  			*rem = 0
   449  		}
   450  		return 0x7fffffff
   451  	}
   452  	if rem != nil {
   453  		*rem = int32(v)
   454  	}
   455  	return res
   456  }
   457  
   458  // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
   459  
   460  //go:nosplit
   461  func acquirem() *m {
   462  	_g_ := getg()
   463  	_g_.m.locks++
   464  	return _g_.m
   465  }
   466  
   467  //go:nosplit
   468  func releasem(mp *m) {
   469  	_g_ := getg()
   470  	mp.locks--
   471  	if mp.locks == 0 && _g_.preempt {
   472  		// restore the preemption request in case we've cleared it in newstack
   473  		_g_.stackguard0 = stackPreempt
   474  	}
   475  }
   476  
   477  //go:nosplit
   478  func gomcache() *mcache {
   479  	return getg().m.mcache
   480  }
   481  
   482  //go:linkname reflect_typelinks reflect.typelinks
   483  func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
   484  	modules := activeModules()
   485  	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
   486  	ret := [][]int32{modules[0].typelinks}
   487  	for _, md := range modules[1:] {
   488  		sections = append(sections, unsafe.Pointer(md.types))
   489  		ret = append(ret, md.typelinks)
   490  	}
   491  	return sections, ret
   492  }
   493  
   494  // reflect_resolveNameOff resolves a name offset from a base pointer.
   495  //go:linkname reflect_resolveNameOff reflect.resolveNameOff
   496  func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
   497  	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).bytes)
   498  }
   499  
   500  // reflect_resolveTypeOff resolves an *rtype offset from a base type.
   501  //go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
   502  func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   503  	return unsafe.Pointer((*_type)(rtype).typeOff(typeOff(off)))
   504  }
   505  
   506  // reflect_resolveTextOff resolves an function pointer offset from a base type.
   507  //go:linkname reflect_resolveTextOff reflect.resolveTextOff
   508  func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
   509  	return (*_type)(rtype).textOff(textOff(off))
   510  
   511  }
   512  
   513  // reflect_addReflectOff adds a pointer to the reflection offset lookup map.
   514  //go:linkname reflect_addReflectOff reflect.addReflectOff
   515  func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
   516  	reflectOffsLock()
   517  	if reflectOffs.m == nil {
   518  		reflectOffs.m = make(map[int32]unsafe.Pointer)
   519  		reflectOffs.minv = make(map[unsafe.Pointer]int32)
   520  		reflectOffs.next = -1
   521  	}
   522  	id, found := reflectOffs.minv[ptr]
   523  	if !found {
   524  		id = reflectOffs.next
   525  		reflectOffs.next-- // use negative offsets as IDs to aid debugging
   526  		reflectOffs.m[id] = ptr
   527  		reflectOffs.minv[ptr] = id
   528  	}
   529  	reflectOffsUnlock()
   530  	return id
   531  }