github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/sys_solaris_amd64.s (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 // 5 // System calls and other sys.stuff for AMD64, SunOS 6 // /usr/include/sys/syscall.h for syscall numbers. 7 // 8 9 #include "go_asm.h" 10 #include "go_tls.h" 11 #include "textflag.h" 12 13 // This is needed by asm_amd64.s 14 TEXT runtime·settls(SB),NOSPLIT,$8 15 RET 16 17 // void libc_miniterrno(void *(*___errno)(void)); 18 // 19 // Set the TLS errno pointer in M. 20 // 21 // Called using runtime·asmcgocall from os_solaris.c:/minit. 22 // NOT USING GO CALLING CONVENTION. 23 TEXT runtime·miniterrno(SB),NOSPLIT,$0 24 // asmcgocall will put first argument into DI. 25 CALL DI // SysV ABI so returns in AX 26 get_tls(CX) 27 MOVQ g(CX), BX 28 MOVQ g_m(BX), BX 29 MOVQ AX, (m_mOS+mOS_perrno)(BX) 30 RET 31 32 // pipe(3c) wrapper that returns fds in AX, DX. 33 // NOT USING GO CALLING CONVENTION. 34 TEXT runtime·pipe1(SB),NOSPLIT,$0 35 SUBQ $16, SP // 8 bytes will do, but stack has to be 16-byte aligned 36 MOVQ SP, DI 37 LEAQ libc_pipe(SB), AX 38 CALL AX 39 MOVL 0(SP), AX 40 MOVL 4(SP), DX 41 ADDQ $16, SP 42 RET 43 44 // Call a library function with SysV calling conventions. 45 // The called function can take a maximum of 6 INTEGER class arguments, 46 // see 47 // Michael Matz, Jan Hubicka, Andreas Jaeger, and Mark Mitchell 48 // System V Application Binary Interface 49 // AMD64 Architecture Processor Supplement 50 // section 3.2.3. 51 // 52 // Called by runtime·asmcgocall or runtime·cgocall. 53 // NOT USING GO CALLING CONVENTION. 54 TEXT runtime·asmsysvicall6(SB),NOSPLIT,$0 55 // asmcgocall will put first argument into DI. 56 PUSHQ DI // save for later 57 MOVQ libcall_fn(DI), AX 58 MOVQ libcall_args(DI), R11 59 MOVQ libcall_n(DI), R10 60 61 get_tls(CX) 62 MOVQ g(CX), BX 63 CMPQ BX, $0 64 JEQ skiperrno1 65 MOVQ g_m(BX), BX 66 MOVQ (m_mOS+mOS_perrno)(BX), DX 67 CMPQ DX, $0 68 JEQ skiperrno1 69 MOVL $0, 0(DX) 70 71 skiperrno1: 72 CMPQ R11, $0 73 JEQ skipargs 74 // Load 6 args into correspondent registers. 75 MOVQ 0(R11), DI 76 MOVQ 8(R11), SI 77 MOVQ 16(R11), DX 78 MOVQ 24(R11), CX 79 MOVQ 32(R11), R8 80 MOVQ 40(R11), R9 81 skipargs: 82 83 // Call SysV function 84 CALL AX 85 86 // Return result 87 POPQ DI 88 MOVQ AX, libcall_r1(DI) 89 MOVQ DX, libcall_r2(DI) 90 91 get_tls(CX) 92 MOVQ g(CX), BX 93 CMPQ BX, $0 94 JEQ skiperrno2 95 MOVQ g_m(BX), BX 96 MOVQ (m_mOS+mOS_perrno)(BX), AX 97 CMPQ AX, $0 98 JEQ skiperrno2 99 MOVL 0(AX), AX 100 MOVQ AX, libcall_err(DI) 101 102 skiperrno2: 103 RET 104 105 // uint32 tstart_sysvicall(M *newm); 106 TEXT runtime·tstart_sysvicall(SB),NOSPLIT,$0 107 // DI contains first arg newm 108 MOVQ m_g0(DI), DX // g 109 110 // Make TLS entries point at g and m. 111 get_tls(BX) 112 MOVQ DX, g(BX) 113 MOVQ DI, g_m(DX) 114 115 // Layout new m scheduler stack on os stack. 116 MOVQ SP, AX 117 MOVQ AX, (g_stack+stack_hi)(DX) 118 SUBQ $(0x100000), AX // stack size 119 MOVQ AX, (g_stack+stack_lo)(DX) 120 ADDQ $const__StackGuard, AX 121 MOVQ AX, g_stackguard0(DX) 122 MOVQ AX, g_stackguard1(DX) 123 124 // Someday the convention will be D is always cleared. 125 CLD 126 127 CALL runtime·stackcheck(SB) // clobbers AX,CX 128 CALL runtime·mstart(SB) 129 130 XORL AX, AX // return 0 == success 131 MOVL AX, ret+8(FP) 132 RET 133 134 // Careful, this is called by __sighndlr, a libc function. We must preserve 135 // registers as per AMD 64 ABI. 136 TEXT runtime·sigtramp(SB),NOSPLIT,$0 137 // Note that we are executing on altsigstack here, so we have 138 // more stack available than NOSPLIT would have us believe. 139 // To defeat the linker, we make our own stack frame with 140 // more space: 141 SUBQ $184, SP 142 143 // save registers 144 MOVQ BX, 32(SP) 145 MOVQ BP, 40(SP) 146 MOVQ R12, 48(SP) 147 MOVQ R13, 56(SP) 148 MOVQ R14, 64(SP) 149 MOVQ R15, 72(SP) 150 151 get_tls(BX) 152 // check that g exists 153 MOVQ g(BX), R10 154 CMPQ R10, $0 155 JNE allgood 156 MOVQ SI, 80(SP) 157 MOVQ DX, 88(SP) 158 LEAQ 80(SP), AX 159 MOVQ DI, 0(SP) 160 MOVQ AX, 8(SP) 161 MOVQ $runtime·badsignal(SB), AX 162 CALL AX 163 JMP exit 164 165 allgood: 166 // Save m->libcall and m->scratch. We need to do this because we 167 // might get interrupted by a signal in runtime·asmcgocall. 168 169 // save m->libcall 170 MOVQ g_m(R10), BP 171 LEAQ m_libcall(BP), R11 172 MOVQ libcall_fn(R11), R10 173 MOVQ R10, 88(SP) 174 MOVQ libcall_args(R11), R10 175 MOVQ R10, 96(SP) 176 MOVQ libcall_n(R11), R10 177 MOVQ R10, 104(SP) 178 MOVQ libcall_r1(R11), R10 179 MOVQ R10, 168(SP) 180 MOVQ libcall_r2(R11), R10 181 MOVQ R10, 176(SP) 182 183 // save m->scratch 184 LEAQ (m_mOS+mOS_scratch)(BP), R11 185 MOVQ 0(R11), R10 186 MOVQ R10, 112(SP) 187 MOVQ 8(R11), R10 188 MOVQ R10, 120(SP) 189 MOVQ 16(R11), R10 190 MOVQ R10, 128(SP) 191 MOVQ 24(R11), R10 192 MOVQ R10, 136(SP) 193 MOVQ 32(R11), R10 194 MOVQ R10, 144(SP) 195 MOVQ 40(R11), R10 196 MOVQ R10, 152(SP) 197 198 // save errno, it might be EINTR; stuff we do here might reset it. 199 MOVQ (m_mOS+mOS_perrno)(BP), R10 200 MOVL 0(R10), R10 201 MOVQ R10, 160(SP) 202 203 // prepare call 204 MOVQ DI, 0(SP) 205 MOVQ SI, 8(SP) 206 MOVQ DX, 16(SP) 207 CALL runtime·sigtrampgo(SB) 208 209 get_tls(BX) 210 MOVQ g(BX), BP 211 MOVQ g_m(BP), BP 212 // restore libcall 213 LEAQ m_libcall(BP), R11 214 MOVQ 88(SP), R10 215 MOVQ R10, libcall_fn(R11) 216 MOVQ 96(SP), R10 217 MOVQ R10, libcall_args(R11) 218 MOVQ 104(SP), R10 219 MOVQ R10, libcall_n(R11) 220 MOVQ 168(SP), R10 221 MOVQ R10, libcall_r1(R11) 222 MOVQ 176(SP), R10 223 MOVQ R10, libcall_r2(R11) 224 225 // restore scratch 226 LEAQ (m_mOS+mOS_scratch)(BP), R11 227 MOVQ 112(SP), R10 228 MOVQ R10, 0(R11) 229 MOVQ 120(SP), R10 230 MOVQ R10, 8(R11) 231 MOVQ 128(SP), R10 232 MOVQ R10, 16(R11) 233 MOVQ 136(SP), R10 234 MOVQ R10, 24(R11) 235 MOVQ 144(SP), R10 236 MOVQ R10, 32(R11) 237 MOVQ 152(SP), R10 238 MOVQ R10, 40(R11) 239 240 // restore errno 241 MOVQ (m_mOS+mOS_perrno)(BP), R11 242 MOVQ 160(SP), R10 243 MOVL R10, 0(R11) 244 245 exit: 246 // restore registers 247 MOVQ 32(SP), BX 248 MOVQ 40(SP), BP 249 MOVQ 48(SP), R12 250 MOVQ 56(SP), R13 251 MOVQ 64(SP), R14 252 MOVQ 72(SP), R15 253 254 ADDQ $184, SP 255 RET 256 257 TEXT runtime·sigfwd(SB),NOSPLIT,$0-32 258 MOVQ fn+0(FP), AX 259 MOVL sig+8(FP), DI 260 MOVQ info+16(FP), SI 261 MOVQ ctx+24(FP), DX 262 PUSHQ BP 263 MOVQ SP, BP 264 ANDQ $~15, SP // alignment for x86_64 ABI 265 CALL AX 266 MOVQ BP, SP 267 POPQ BP 268 RET 269 270 // Called from runtime·usleep (Go). Can be called on Go stack, on OS stack, 271 // can also be called in cgo callback path without a g->m. 272 TEXT runtime·usleep1(SB),NOSPLIT,$0 273 MOVL usec+0(FP), DI 274 MOVQ $usleep2<>(SB), AX // to hide from 6l 275 276 // Execute call on m->g0. 277 get_tls(R15) 278 CMPQ R15, $0 279 JE noswitch 280 281 MOVQ g(R15), R13 282 CMPQ R13, $0 283 JE noswitch 284 MOVQ g_m(R13), R13 285 CMPQ R13, $0 286 JE noswitch 287 // TODO(aram): do something about the cpu profiler here. 288 289 MOVQ m_g0(R13), R14 290 CMPQ g(R15), R14 291 JNE switch 292 // executing on m->g0 already 293 CALL AX 294 RET 295 296 switch: 297 // Switch to m->g0 stack and back. 298 MOVQ (g_sched+gobuf_sp)(R14), R14 299 MOVQ SP, -8(R14) 300 LEAQ -8(R14), SP 301 CALL AX 302 MOVQ 0(SP), SP 303 RET 304 305 noswitch: 306 // Not a Go-managed thread. Do not switch stack. 307 CALL AX 308 RET 309 310 // Runs on OS stack. duration (in µs units) is in DI. 311 TEXT usleep2<>(SB),NOSPLIT,$0 312 LEAQ libc_usleep(SB), AX 313 CALL AX 314 RET 315 316 // Runs on OS stack, called from runtime·osyield. 317 TEXT runtime·osyield1(SB),NOSPLIT,$0 318 LEAQ libc_sched_yield(SB), AX 319 CALL AX 320 RET