github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/runtime/stubs.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  // Declarations for runtime services implemented in C or assembly.
    10  
    11  const ptrSize = 4 << (^uintptr(0) >> 63)             // unsafe.Sizeof(uintptr(0)) but an ideal const
    12  const regSize = 4 << (^uintreg(0) >> 63)             // unsafe.Sizeof(uintreg(0)) but an ideal const
    13  const spAlign = 1*(1-goarch_arm64) + 16*goarch_arm64 // SP alignment: 1 normally, 16 for ARM64
    14  
    15  // Should be a built-in for unsafe.Pointer?
    16  //go:nosplit
    17  func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
    18  	return unsafe.Pointer(uintptr(p) + x)
    19  }
    20  
    21  // getg returns the pointer to the current g.
    22  // The compiler rewrites calls to this function into instructions
    23  // that fetch the g directly (from TLS or from the dedicated register).
    24  func getg() *g
    25  
    26  // mcall switches from the g to the g0 stack and invokes fn(g),
    27  // where g is the goroutine that made the call.
    28  // mcall saves g's current PC/SP in g->sched so that it can be restored later.
    29  // It is up to fn to arrange for that later execution, typically by recording
    30  // g in a data structure, causing something to call ready(g) later.
    31  // mcall returns to the original goroutine g later, when g has been rescheduled.
    32  // fn must not return at all; typically it ends by calling schedule, to let the m
    33  // run other goroutines.
    34  //
    35  // mcall can only be called from g stacks (not g0, not gsignal).
    36  //
    37  // This must NOT be go:noescape: if fn is a stack-allocated closure,
    38  // fn puts g on a run queue, and g executes before fn returns, the
    39  // closure will be invalidated while it is still executing.
    40  func mcall(fn func(*g))
    41  
    42  // systemstack runs fn on a system stack.
    43  // If systemstack is called from the per-OS-thread (g0) stack, or
    44  // if systemstack is called from the signal handling (gsignal) stack,
    45  // systemstack calls fn directly and returns.
    46  // Otherwise, systemstack is being called from the limited stack
    47  // of an ordinary goroutine. In this case, systemstack switches
    48  // to the per-OS-thread stack, calls fn, and switches back.
    49  // It is common to use a func literal as the argument, in order
    50  // to share inputs and outputs with the code around the call
    51  // to system stack:
    52  //
    53  //	... set up y ...
    54  //	systemstack(func() {
    55  //		x = bigcall(y)
    56  //	})
    57  //	... use x ...
    58  //
    59  //go:noescape
    60  func systemstack(fn func())
    61  
    62  func badsystemstack() {
    63  	throw("systemstack called from unexpected goroutine")
    64  }
    65  
    66  // memclr clears n bytes starting at ptr.
    67  // in memclr_*.s
    68  //go:noescape
    69  func memclr(ptr unsafe.Pointer, n uintptr)
    70  
    71  //go:linkname reflect_memclr reflect.memclr
    72  func reflect_memclr(ptr unsafe.Pointer, n uintptr) {
    73  	memclr(ptr, n)
    74  }
    75  
    76  // memmove copies n bytes from "from" to "to".
    77  // in memmove_*.s
    78  //go:noescape
    79  func memmove(to, from unsafe.Pointer, n uintptr)
    80  
    81  //go:linkname reflect_memmove reflect.memmove
    82  func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
    83  	memmove(to, from, n)
    84  }
    85  
    86  // exported value for testing
    87  var hashLoad = loadFactor
    88  
    89  // in asm_*.s
    90  func fastrand1() uint32
    91  
    92  // in asm_*.s
    93  //go:noescape
    94  func memeq(a, b unsafe.Pointer, size uintptr) bool
    95  
    96  // noescape hides a pointer from escape analysis.  noescape is
    97  // the identity function but escape analysis doesn't think the
    98  // output depends on the input.  noescape is inlined and currently
    99  // compiles down to a single xor instruction.
   100  // USE CAREFULLY!
   101  //go:nosplit
   102  func noescape(p unsafe.Pointer) unsafe.Pointer {
   103  	x := uintptr(p)
   104  	return unsafe.Pointer(x ^ 0)
   105  }
   106  
   107  func cgocallback(fn, frame unsafe.Pointer, framesize uintptr)
   108  func gogo(buf *gobuf)
   109  func gosave(buf *gobuf)
   110  func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
   111  
   112  //go:noescape
   113  func jmpdefer(fv *funcval, argp uintptr)
   114  func exit1(code int32)
   115  func asminit()
   116  func setg(gg *g)
   117  func breakpoint()
   118  
   119  // reflectcall calls fn with a copy of the n argument bytes pointed at by arg.
   120  // After fn returns, reflectcall copies n-retoffset result bytes
   121  // back into arg+retoffset before returning. If copying result bytes back,
   122  // the caller should pass the argument frame type as argtype, so that
   123  // call can execute appropriate write barriers during the copy.
   124  // Package reflect passes a frame type. In package runtime, there is only
   125  // one call that copies results back, in cgocallbackg1, and it does NOT pass a
   126  // frame type, meaning there are no write barriers invoked. See that call
   127  // site for justification.
   128  func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32)
   129  
   130  func procyield(cycles uint32)
   131  
   132  type neverCallThisFunction struct{}
   133  
   134  // goexit is the return stub at the top of every goroutine call stack.
   135  // Each goroutine stack is constructed as if goexit called the
   136  // goroutine's entry point function, so that when the entry point
   137  // function returns, it will return to goexit, which will call goexit1
   138  // to perform the actual exit.
   139  //
   140  // This function must never be called directly. Call goexit1 instead.
   141  // gentraceback assumes that goexit terminates the stack. A direct
   142  // call on the stack will cause gentraceback to stop walking the stack
   143  // prematurely and if there are leftover stack barriers it may panic.
   144  func goexit(neverCallThisFunction)
   145  
   146  // Not all cgocallback_gofunc frames are actually cgocallback_gofunc,
   147  // so not all have these arguments. Mark them uintptr so that the GC
   148  // does not misinterpret memory when the arguments are not present.
   149  // cgocallback_gofunc is not called from go, only from cgocallback,
   150  // so the arguments will be found via cgocallback's pointer-declared arguments.
   151  // See the assembly implementations for more details.
   152  func cgocallback_gofunc(fv uintptr, frame uintptr, framesize uintptr)
   153  
   154  //go:noescape
   155  func cas(ptr *uint32, old, new uint32) bool
   156  
   157  // NO go:noescape annotation; see atomic_pointer.go.
   158  func casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
   159  
   160  func nop() // call to prevent inlining of function body
   161  
   162  //go:noescape
   163  func casuintptr(ptr *uintptr, old, new uintptr) bool
   164  
   165  //go:noescape
   166  func atomicstoreuintptr(ptr *uintptr, new uintptr)
   167  
   168  //go:noescape
   169  func atomicloaduintptr(ptr *uintptr) uintptr
   170  
   171  //go:noescape
   172  func atomicloaduint(ptr *uint) uint
   173  
   174  // TODO: Write native implementations of int64 atomic ops (or improve
   175  // inliner). These portable ones can't be inlined right now, so we're
   176  // taking an extra function call hit.
   177  
   178  func atomicstoreint64(ptr *int64, new int64) {
   179  	atomicstore64((*uint64)(unsafe.Pointer(ptr)), uint64(new))
   180  }
   181  
   182  func atomicloadint64(ptr *int64) int64 {
   183  	return int64(atomicload64((*uint64)(unsafe.Pointer(ptr))))
   184  }
   185  
   186  func xaddint64(ptr *int64, delta int64) int64 {
   187  	return int64(xadd64((*uint64)(unsafe.Pointer(ptr)), delta))
   188  }
   189  
   190  // publicationBarrier performs a store/store barrier (a "publication"
   191  // or "export" barrier). Some form of synchronization is required
   192  // between initializing an object and making that object accessible to
   193  // another processor. Without synchronization, the initialization
   194  // writes and the "publication" write may be reordered, allowing the
   195  // other processor to follow the pointer and observe an uninitialized
   196  // object. In general, higher-level synchronization should be used,
   197  // such as locking or an atomic pointer write. publicationBarrier is
   198  // for when those aren't an option, such as in the implementation of
   199  // the memory manager.
   200  //
   201  // There's no corresponding barrier for the read side because the read
   202  // side naturally has a data dependency order. All architectures that
   203  // Go supports or seems likely to ever support automatically enforce
   204  // data dependency ordering.
   205  func publicationBarrier()
   206  
   207  //go:noescape
   208  func setcallerpc(argp unsafe.Pointer, pc uintptr)
   209  
   210  // getcallerpc returns the program counter (PC) of its caller's caller.
   211  // getcallersp returns the stack pointer (SP) of its caller's caller.
   212  // For both, the argp must be a pointer to the caller's first function argument.
   213  // The implementation may or may not use argp, depending on
   214  // the architecture.
   215  //
   216  // For example:
   217  //
   218  //	func f(arg1, arg2, arg3 int) {
   219  //		pc := getcallerpc(unsafe.Pointer(&arg1))
   220  //		sp := getcallersp(unsafe.Pointer(&arg1))
   221  //	}
   222  //
   223  // These two lines find the PC and SP immediately following
   224  // the call to f (where f will return).
   225  //
   226  // The call to getcallerpc and getcallersp must be done in the
   227  // frame being asked about. It would not be correct for f to pass &arg1
   228  // to another function g and let g call getcallerpc/getcallersp.
   229  // The call inside g might return information about g's caller or
   230  // information about f's caller or complete garbage.
   231  //
   232  // The result of getcallersp is correct at the time of the return,
   233  // but it may be invalidated by any subsequent call to a function
   234  // that might relocate the stack in order to grow or shrink it.
   235  // A general rule is that the result of getcallersp should be used
   236  // immediately and can only be passed to nosplit functions.
   237  
   238  //go:noescape
   239  func getcallerpc(argp unsafe.Pointer) uintptr
   240  
   241  //go:noescape
   242  func getcallersp(argp unsafe.Pointer) uintptr
   243  
   244  //go:noescape
   245  func asmcgocall(fn, arg unsafe.Pointer) int32
   246  
   247  // argp used in Defer structs when there is no argp.
   248  const _NoArgs = ^uintptr(0)
   249  
   250  func morestack()
   251  func rt0_go()
   252  
   253  // stackBarrier records that the stack has been unwound past a certain
   254  // point. It is installed over a return PC on the stack. It must
   255  // retrieve the original return PC from g.stkbuf, increment
   256  // g.stkbufPos to record that the barrier was hit, and jump to the
   257  // original return PC.
   258  func stackBarrier()
   259  
   260  // return0 is a stub used to return 0 from deferproc.
   261  // It is called at the very end of deferproc to signal
   262  // the calling Go function that it should not jump
   263  // to deferreturn.
   264  // in asm_*.s
   265  func return0()
   266  
   267  //go:linkname time_now time.now
   268  func time_now() (sec int64, nsec int32)
   269  
   270  // in asm_*.s
   271  // not called directly; definitions here supply type information for traceback.
   272  func call32(fn, arg unsafe.Pointer, n, retoffset uint32)
   273  func call64(fn, arg unsafe.Pointer, n, retoffset uint32)
   274  func call128(fn, arg unsafe.Pointer, n, retoffset uint32)
   275  func call256(fn, arg unsafe.Pointer, n, retoffset uint32)
   276  func call512(fn, arg unsafe.Pointer, n, retoffset uint32)
   277  func call1024(fn, arg unsafe.Pointer, n, retoffset uint32)
   278  func call2048(fn, arg unsafe.Pointer, n, retoffset uint32)
   279  func call4096(fn, arg unsafe.Pointer, n, retoffset uint32)
   280  func call8192(fn, arg unsafe.Pointer, n, retoffset uint32)
   281  func call16384(fn, arg unsafe.Pointer, n, retoffset uint32)
   282  func call32768(fn, arg unsafe.Pointer, n, retoffset uint32)
   283  func call65536(fn, arg unsafe.Pointer, n, retoffset uint32)
   284  func call131072(fn, arg unsafe.Pointer, n, retoffset uint32)
   285  func call262144(fn, arg unsafe.Pointer, n, retoffset uint32)
   286  func call524288(fn, arg unsafe.Pointer, n, retoffset uint32)
   287  func call1048576(fn, arg unsafe.Pointer, n, retoffset uint32)
   288  func call2097152(fn, arg unsafe.Pointer, n, retoffset uint32)
   289  func call4194304(fn, arg unsafe.Pointer, n, retoffset uint32)
   290  func call8388608(fn, arg unsafe.Pointer, n, retoffset uint32)
   291  func call16777216(fn, arg unsafe.Pointer, n, retoffset uint32)
   292  func call33554432(fn, arg unsafe.Pointer, n, retoffset uint32)
   293  func call67108864(fn, arg unsafe.Pointer, n, retoffset uint32)
   294  func call134217728(fn, arg unsafe.Pointer, n, retoffset uint32)
   295  func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32)
   296  func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32)
   297  func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32)
   298  
   299  func systemstack_switch()
   300  
   301  func prefetcht0(addr uintptr)
   302  func prefetcht1(addr uintptr)
   303  func prefetcht2(addr uintptr)
   304  func prefetchnta(addr uintptr)
   305  
   306  func unixnanotime() int64 {
   307  	sec, nsec := time_now()
   308  	return sec*1e9 + int64(nsec)
   309  }
   310  
   311  // round n up to a multiple of a.  a must be a power of 2.
   312  func round(n, a uintptr) uintptr {
   313  	return (n + a - 1) &^ (a - 1)
   314  }