github.com/guyezi/gofrontend@v0.0.0-20200228202240-7a62a49e62c0/libgo/go/runtime/stubs.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/sys" 9 "unsafe" 10 ) 11 12 // Should be a built-in for unsafe.Pointer? 13 //go:nosplit 14 func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { 15 return unsafe.Pointer(uintptr(p) + x) 16 } 17 18 // getg returns the pointer to the current g. 19 // The compiler rewrites calls to this function into instructions 20 // that fetch the g directly (from TLS or from the dedicated register). 21 func getg() *g 22 23 // mcall switches from the g to the g0 stack and invokes fn(g), 24 // where g is the goroutine that made the call. 25 // mcall saves g's current PC/SP in g->sched so that it can be restored later. 26 // It is up to fn to arrange for that later execution, typically by recording 27 // g in a data structure, causing something to call ready(g) later. 28 // mcall returns to the original goroutine g later, when g has been rescheduled. 29 // fn must not return at all; typically it ends by calling schedule, to let the m 30 // run other goroutines. 31 // 32 // mcall can only be called from g stacks (not g0, not gsignal). 33 // 34 // This must NOT be go:noescape: if fn is a stack-allocated closure, 35 // fn puts g on a run queue, and g executes before fn returns, the 36 // closure will be invalidated while it is still executing. 37 func mcall(fn func(*g)) 38 39 // systemstack runs fn on a system stack. 40 // 41 // It is common to use a func literal as the argument, in order 42 // to share inputs and outputs with the code around the call 43 // to system stack: 44 // 45 // ... set up y ... 46 // systemstack(func() { 47 // x = bigcall(y) 48 // }) 49 // ... use x ... 50 // 51 // For the gc toolchain this permits running a function that requires 52 // additional stack space in a context where the stack can not be 53 // split. We don't really need additional stack space in gccgo, since 54 // stack splitting is handled separately. But to keep things looking 55 // the same, we do switch to the g0 stack here if necessary. 56 func systemstack(fn func()) { 57 gp := getg() 58 mp := gp.m 59 if gp == mp.g0 || gp == mp.gsignal { 60 fn() 61 } else if gp == mp.curg { 62 fn1 := func(origg *g) { 63 fn() 64 gogo(origg) 65 } 66 mcall(*(*func(*g))(noescape(unsafe.Pointer(&fn1)))) 67 } else { 68 badsystemstack() 69 } 70 } 71 72 var badsystemstackMsg = "fatal: systemstack called from unexpected goroutine" 73 74 //go:nosplit 75 //go:nowritebarrierrec 76 func badsystemstack() { 77 sp := stringStructOf(&badsystemstackMsg) 78 write(2, sp.str, int32(sp.len)) 79 } 80 81 // memclrNoHeapPointers clears n bytes starting at ptr. 82 // 83 // Usually you should use typedmemclr. memclrNoHeapPointers should be 84 // used only when the caller knows that *ptr contains no heap pointers 85 // because either: 86 // 87 // *ptr is initialized memory and its type is pointer-free, or 88 // 89 // *ptr is uninitialized memory (e.g., memory that's being reused 90 // for a new allocation) and hence contains only "junk". 91 // 92 // The (CPU-specific) implementations of this function are in memclr_*.s. 93 //go:noescape 94 func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) 95 96 //go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers 97 func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) { 98 memclrNoHeapPointers(ptr, n) 99 } 100 101 // memmove copies n bytes from "from" to "to". 102 //go:noescape 103 //extern __builtin_memmove 104 func memmove(to, from unsafe.Pointer, n uintptr) 105 106 //go:linkname reflect_memmove reflect.memmove 107 func reflect_memmove(to, from unsafe.Pointer, n uintptr) { 108 memmove(to, from, n) 109 } 110 111 //go:noescape 112 //extern __builtin_memcmp 113 func memcmp(a, b unsafe.Pointer, size uintptr) int32 114 115 // exported value for testing 116 var hashLoad = float32(loadFactorNum) / float32(loadFactorDen) 117 118 //go:nosplit 119 func fastrand() uint32 { 120 mp := getg().m 121 // Implement xorshift64+: 2 32-bit xorshift sequences added together. 122 // Shift triplet [17,7,16] was calculated as indicated in Marsaglia's 123 // Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf 124 // This generator passes the SmallCrush suite, part of TestU01 framework: 125 // http://simul.iro.umontreal.ca/testu01/tu01.html 126 s1, s0 := mp.fastrand[0], mp.fastrand[1] 127 s1 ^= s1 << 17 128 s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16 129 mp.fastrand[0], mp.fastrand[1] = s0, s1 130 return s0 + s1 131 } 132 133 //go:nosplit 134 func fastrandn(n uint32) uint32 { 135 // This is similar to fastrand() % n, but faster. 136 // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ 137 return uint32(uint64(fastrand()) * uint64(n) >> 32) 138 } 139 140 //go:linkname sync_fastrand sync.fastrand 141 func sync_fastrand() uint32 { return fastrand() } 142 143 // in asm_*.s 144 //go:noescape 145 func memequal(a, b unsafe.Pointer, size uintptr) bool 146 147 // noescape hides a pointer from escape analysis. noescape is 148 // the identity function but escape analysis doesn't think the 149 // output depends on the input. noescape is inlined and currently 150 // compiles down to zero instructions. 151 // USE CAREFULLY! 152 //go:nosplit 153 func noescape(p unsafe.Pointer) unsafe.Pointer { 154 x := uintptr(p) 155 return unsafe.Pointer(x ^ 0) 156 } 157 158 //go:noescape 159 func jmpdefer(fv *funcval, argp uintptr) 160 func exit1(code int32) 161 func setg(gg *g) 162 163 //extern __builtin_trap 164 func breakpoint() 165 166 func asminit() {} 167 168 //go:noescape 169 func reflectcall(fntype *functype, fn *funcval, isInterface, isMethod bool, params, results *unsafe.Pointer) 170 171 func procyield(cycles uint32) 172 173 type neverCallThisFunction struct{} 174 175 // goexit is the return stub at the top of every goroutine call stack. 176 // Each goroutine stack is constructed as if goexit called the 177 // goroutine's entry point function, so that when the entry point 178 // function returns, it will return to goexit, which will call goexit1 179 // to perform the actual exit. 180 // 181 // This function must never be called directly. Call goexit1 instead. 182 // gentraceback assumes that goexit terminates the stack. A direct 183 // call on the stack will cause gentraceback to stop walking the stack 184 // prematurely and if there is leftover state it may panic. 185 func goexit(neverCallThisFunction) 186 187 // publicationBarrier performs a store/store barrier (a "publication" 188 // or "export" barrier). Some form of synchronization is required 189 // between initializing an object and making that object accessible to 190 // another processor. Without synchronization, the initialization 191 // writes and the "publication" write may be reordered, allowing the 192 // other processor to follow the pointer and observe an uninitialized 193 // object. In general, higher-level synchronization should be used, 194 // such as locking or an atomic pointer write. publicationBarrier is 195 // for when those aren't an option, such as in the implementation of 196 // the memory manager. 197 // 198 // There's no corresponding barrier for the read side because the read 199 // side naturally has a data dependency order. All architectures that 200 // Go supports or seems likely to ever support automatically enforce 201 // data dependency ordering. 202 func publicationBarrier() 203 204 // getcallerpc returns the program counter (PC) of its caller's caller. 205 // getcallersp returns the stack pointer (SP) of its caller's caller. 206 // The implementation may be a compiler intrinsic; there is not 207 // necessarily code implementing this on every platform. 208 // 209 // For example: 210 // 211 // func f(arg1, arg2, arg3 int) { 212 // pc := getcallerpc() 213 // sp := getcallersp() 214 // } 215 // 216 // These two lines find the PC and SP immediately following 217 // the call to f (where f will return). 218 // 219 // The call to getcallerpc and getcallersp must be done in the 220 // frame being asked about. 221 // 222 // The result of getcallersp is correct at the time of the return, 223 // but it may be invalidated by any subsequent call to a function 224 // that might relocate the stack in order to grow or shrink it. 225 // A general rule is that the result of getcallersp should be used 226 // immediately and can only be passed to nosplit functions. 227 228 //go:noescape 229 func getcallerpc() uintptr 230 231 //go:noescape 232 func getcallersp() uintptr // implemented as an intrinsic on all platforms 233 234 // getsp returns the stack pointer (SP) of the caller of getsp. 235 //go:noinline 236 func getsp() uintptr { return getcallersp() } 237 238 func asmcgocall(fn, arg unsafe.Pointer) int32 { 239 throw("asmcgocall") 240 return 0 241 } 242 243 // alignUp rounds n up to a multiple of a. a must be a power of 2. 244 func alignUp(n, a uintptr) uintptr { 245 return (n + a - 1) &^ (a - 1) 246 } 247 248 // alignDown rounds n down to a multiple of a. a must be a power of 2. 249 func alignDown(n, a uintptr) uintptr { 250 return n &^ (a - 1) 251 } 252 253 // checkASM returns whether assembly runtime checks have passed. 254 func checkASM() bool { 255 return true 256 } 257 258 // For gccgo this is in the C code. 259 func osyield() 260 261 //extern __go_syscall6 262 func syscall(trap uintptr, a1, a2, a3, a4, a5, a6 uintptr) uintptr 263 264 // For gccgo, to communicate from the C code to the Go code. 265 //go:linkname setIsCgo 266 func setIsCgo() { 267 iscgo = true 268 } 269 270 // For gccgo, to communicate from the C code to the Go code. 271 //go:linkname setSupportAES 272 func setSupportAES(v bool) { 273 support_aes = v 274 } 275 276 // Here for gccgo. 277 func errno() int 278 279 // For gccgo these are written in C. 280 func entersyscall() 281 func entersyscallblock() 282 283 // Get signal trampoline, written in C. 284 func getSigtramp() uintptr 285 286 // The sa_handler field is generally hidden in a union, so use C accessors. 287 //go:noescape 288 func getSigactionHandler(*_sigaction) uintptr 289 290 //go:noescape 291 func setSigactionHandler(*_sigaction, uintptr) 292 293 // Retrieve fields from the siginfo_t and ucontext_t pointers passed 294 // to a signal handler using C, as they are often hidden in a union. 295 // Returns and, if available, PC where signal occurred. 296 func getSiginfo(*_siginfo_t, unsafe.Pointer) (sigaddr uintptr, sigpc uintptr) 297 298 // Implemented in C for gccgo. 299 func dumpregs(*_siginfo_t, unsafe.Pointer) 300 301 // Implemented in C for gccgo. 302 func setRandomNumber(uint32) 303 304 // Called by gccgo's proc.c. 305 //go:linkname allocg 306 func allocg() *g { 307 return new(g) 308 } 309 310 // Throw and rethrow an exception. 311 func throwException() 312 func rethrowException() 313 314 // Fetch the size and required alignment of the _Unwind_Exception type 315 // used by the stack unwinder. 316 func unwindExceptionSize() uintptr 317 318 const uintptrMask = 1<<(8*sys.PtrSize) - 1 319 320 type bitvector struct { 321 n int32 // # of bits 322 bytedata *uint8 323 } 324 325 // ptrbit returns the i'th bit in bv. 326 // ptrbit is less efficient than iterating directly over bitvector bits, 327 // and should only be used in non-performance-critical code. 328 // See adjustpointers for an example of a high-efficiency walk of a bitvector. 329 func (bv *bitvector) ptrbit(i uintptr) uint8 { 330 b := *(addb(bv.bytedata, i/8)) 331 return (b >> (i % 8)) & 1 332 } 333 334 // bool2int returns 0 if x is false or 1 if x is true. 335 func bool2int(x bool) int { 336 if x { 337 return 1 338 } 339 return 0 340 } 341 342 // abort crashes the runtime in situations where even throw might not 343 // work. In general it should do something a debugger will recognize 344 // (e.g., an INT3 on x86). A crash in abort is recognized by the 345 // signal handler, which will attempt to tear down the runtime 346 // immediately. 347 func abort() 348 349 // usestackmaps is true if stack map (precise stack scan) is enabled. 350 var usestackmaps bool 351 352 // probestackmaps detects whether there are stack maps. 353 func probestackmaps() bool 354 355 // For the math/bits packages for gccgo. 356 //go:linkname getDivideError 357 func getDivideError() error { 358 return divideError 359 } 360 361 // For the math/bits packages for gccgo. 362 //go:linkname getOverflowError 363 func getOverflowError() error { 364 return overflowError 365 }