github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/sys_linux_arm.s (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // 6 // System calls and other sys.stuff for arm, Linux 7 // 8 9 #include "go_asm.h" 10 #include "go_tls.h" 11 #include "textflag.h" 12 13 #define CLOCK_REALTIME 0 14 #define CLOCK_MONOTONIC 1 15 16 // for EABI, as we don't support OABI 17 #define SYS_BASE 0x0 18 19 #define SYS_exit (SYS_BASE + 1) 20 #define SYS_read (SYS_BASE + 3) 21 #define SYS_write (SYS_BASE + 4) 22 #define SYS_open (SYS_BASE + 5) 23 #define SYS_close (SYS_BASE + 6) 24 #define SYS_getpid (SYS_BASE + 20) 25 #define SYS_kill (SYS_BASE + 37) 26 #define SYS_pipe (SYS_BASE + 42) 27 #define SYS_clone (SYS_BASE + 120) 28 #define SYS_rt_sigreturn (SYS_BASE + 173) 29 #define SYS_rt_sigaction (SYS_BASE + 174) 30 #define SYS_rt_sigprocmask (SYS_BASE + 175) 31 #define SYS_sigaltstack (SYS_BASE + 186) 32 #define SYS_mmap2 (SYS_BASE + 192) 33 #define SYS_futex (SYS_BASE + 240) 34 #define SYS_exit_group (SYS_BASE + 248) 35 #define SYS_munmap (SYS_BASE + 91) 36 #define SYS_madvise (SYS_BASE + 220) 37 #define SYS_setitimer (SYS_BASE + 104) 38 #define SYS_mincore (SYS_BASE + 219) 39 #define SYS_gettid (SYS_BASE + 224) 40 #define SYS_tgkill (SYS_BASE + 268) 41 #define SYS_sched_yield (SYS_BASE + 158) 42 #define SYS_nanosleep (SYS_BASE + 162) 43 #define SYS_sched_getaffinity (SYS_BASE + 242) 44 #define SYS_clock_gettime (SYS_BASE + 263) 45 #define SYS_epoll_create (SYS_BASE + 250) 46 #define SYS_epoll_ctl (SYS_BASE + 251) 47 #define SYS_epoll_wait (SYS_BASE + 252) 48 #define SYS_epoll_create1 (SYS_BASE + 357) 49 #define SYS_pipe2 (SYS_BASE + 359) 50 #define SYS_fcntl (SYS_BASE + 55) 51 #define SYS_access (SYS_BASE + 33) 52 #define SYS_connect (SYS_BASE + 283) 53 #define SYS_socket (SYS_BASE + 281) 54 #define SYS_brk (SYS_BASE + 45) 55 56 #define ARM_BASE (SYS_BASE + 0x0f0000) 57 58 TEXT runtime·open(SB),NOSPLIT,$0 59 MOVW name+0(FP), R0 60 MOVW mode+4(FP), R1 61 MOVW perm+8(FP), R2 62 MOVW $SYS_open, R7 63 SWI $0 64 MOVW $0xfffff001, R1 65 CMP R1, R0 66 MOVW.HI $-1, R0 67 MOVW R0, ret+12(FP) 68 RET 69 70 TEXT runtime·closefd(SB),NOSPLIT,$0 71 MOVW fd+0(FP), R0 72 MOVW $SYS_close, R7 73 SWI $0 74 MOVW $0xfffff001, R1 75 CMP R1, R0 76 MOVW.HI $-1, R0 77 MOVW R0, ret+4(FP) 78 RET 79 80 TEXT runtime·write1(SB),NOSPLIT,$0 81 MOVW fd+0(FP), R0 82 MOVW p+4(FP), R1 83 MOVW n+8(FP), R2 84 MOVW $SYS_write, R7 85 SWI $0 86 MOVW R0, ret+12(FP) 87 RET 88 89 TEXT runtime·read(SB),NOSPLIT,$0 90 MOVW fd+0(FP), R0 91 MOVW p+4(FP), R1 92 MOVW n+8(FP), R2 93 MOVW $SYS_read, R7 94 SWI $0 95 MOVW R0, ret+12(FP) 96 RET 97 98 // func pipe() (r, w int32, errno int32) 99 TEXT runtime·pipe(SB),NOSPLIT,$0-12 100 MOVW $r+0(FP), R0 101 MOVW $SYS_pipe, R7 102 SWI $0 103 MOVW R0, errno+8(FP) 104 RET 105 106 // func pipe2(flags int32) (r, w int32, errno int32) 107 TEXT runtime·pipe2(SB),NOSPLIT,$0-16 108 MOVW $r+4(FP), R0 109 MOVW flags+0(FP), R1 110 MOVW $SYS_pipe2, R7 111 SWI $0 112 MOVW R0, errno+12(FP) 113 RET 114 115 TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0 116 MOVW code+0(FP), R0 117 MOVW $SYS_exit_group, R7 118 SWI $0 119 MOVW $1234, R0 120 MOVW $1002, R1 121 MOVW R0, (R1) // fail hard 122 123 TEXT exit1<>(SB),NOSPLIT|NOFRAME,$0 124 MOVW code+0(FP), R0 125 MOVW $SYS_exit, R7 126 SWI $0 127 MOVW $1234, R0 128 MOVW $1003, R1 129 MOVW R0, (R1) // fail hard 130 131 // func exitThread(wait *uint32) 132 TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-4 133 MOVW wait+0(FP), R0 134 // We're done using the stack. 135 // Alas, there's no reliable way to make this write atomic 136 // without potentially using the stack. So it goes. 137 MOVW $0, R1 138 MOVW R1, (R0) 139 MOVW $0, R0 // exit code 140 MOVW $SYS_exit, R7 141 SWI $0 142 MOVW $1234, R0 143 MOVW $1004, R1 144 MOVW R0, (R1) // fail hard 145 JMP 0(PC) 146 147 TEXT runtime·gettid(SB),NOSPLIT,$0-4 148 MOVW $SYS_gettid, R7 149 SWI $0 150 MOVW R0, ret+0(FP) 151 RET 152 153 TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0 154 MOVW $SYS_getpid, R7 155 SWI $0 156 MOVW R0, R4 157 MOVW $SYS_gettid, R7 158 SWI $0 159 MOVW R0, R1 // arg 2 tid 160 MOVW R4, R0 // arg 1 pid 161 MOVW sig+0(FP), R2 // arg 3 162 MOVW $SYS_tgkill, R7 163 SWI $0 164 RET 165 166 TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0 167 MOVW $SYS_getpid, R7 168 SWI $0 169 // arg 1 tid already in R0 from getpid 170 MOVW sig+0(FP), R1 // arg 2 - signal 171 MOVW $SYS_kill, R7 172 SWI $0 173 RET 174 175 TEXT ·getpid(SB),NOSPLIT,$0-4 176 MOVW $SYS_getpid, R7 177 SWI $0 178 MOVW R0, ret+0(FP) 179 RET 180 181 TEXT ·tgkill(SB),NOSPLIT,$0-12 182 MOVW tgid+0(FP), R0 183 MOVW tid+4(FP), R1 184 MOVW sig+8(FP), R2 185 MOVW $SYS_tgkill, R7 186 SWI $0 187 RET 188 189 TEXT runtime·mmap(SB),NOSPLIT,$0 190 MOVW addr+0(FP), R0 191 MOVW n+4(FP), R1 192 MOVW prot+8(FP), R2 193 MOVW flags+12(FP), R3 194 MOVW fd+16(FP), R4 195 MOVW off+20(FP), R5 196 MOVW $SYS_mmap2, R7 197 SWI $0 198 MOVW $0xfffff001, R6 199 CMP R6, R0 200 MOVW $0, R1 201 RSB.HI $0, R0 202 MOVW.HI R0, R1 // if error, put in R1 203 MOVW.HI $0, R0 204 MOVW R0, p+24(FP) 205 MOVW R1, err+28(FP) 206 RET 207 208 TEXT runtime·munmap(SB),NOSPLIT,$0 209 MOVW addr+0(FP), R0 210 MOVW n+4(FP), R1 211 MOVW $SYS_munmap, R7 212 SWI $0 213 MOVW $0xfffff001, R6 214 CMP R6, R0 215 MOVW.HI $0, R8 // crash on syscall failure 216 MOVW.HI R8, (R8) 217 RET 218 219 TEXT runtime·madvise(SB),NOSPLIT,$0 220 MOVW addr+0(FP), R0 221 MOVW n+4(FP), R1 222 MOVW flags+8(FP), R2 223 MOVW $SYS_madvise, R7 224 SWI $0 225 MOVW R0, ret+12(FP) 226 RET 227 228 TEXT runtime·setitimer(SB),NOSPLIT,$0 229 MOVW mode+0(FP), R0 230 MOVW new+4(FP), R1 231 MOVW old+8(FP), R2 232 MOVW $SYS_setitimer, R7 233 SWI $0 234 RET 235 236 TEXT runtime·mincore(SB),NOSPLIT,$0 237 MOVW addr+0(FP), R0 238 MOVW n+4(FP), R1 239 MOVW dst+8(FP), R2 240 MOVW $SYS_mincore, R7 241 SWI $0 242 MOVW R0, ret+12(FP) 243 RET 244 245 TEXT runtime·walltime1(SB),NOSPLIT,$8-12 246 // We don't know how much stack space the VDSO code will need, 247 // so switch to g0. 248 249 // Save old SP. Use R13 instead of SP to avoid linker rewriting the offsets. 250 MOVW R13, R4 // R4 is unchanged by C code. 251 252 MOVW g_m(g), R5 // R5 is unchanged by C code. 253 254 // Set vdsoPC and vdsoSP for SIGPROF traceback. 255 // Save the old values on stack and restore them on exit, 256 // so this function is reentrant. 257 MOVW m_vdsoPC(R5), R1 258 MOVW m_vdsoSP(R5), R2 259 MOVW R1, 4(R13) 260 MOVW R2, 8(R13) 261 262 MOVW LR, m_vdsoPC(R5) 263 MOVW R13, m_vdsoSP(R5) 264 265 MOVW m_curg(R5), R0 266 267 CMP g, R0 // Only switch if on curg. 268 B.NE noswitch 269 270 MOVW m_g0(R5), R0 271 MOVW (g_sched+gobuf_sp)(R0), R13 // Set SP to g0 stack 272 273 noswitch: 274 SUB $24, R13 // Space for results 275 BIC $0x7, R13 // Align for C code 276 277 MOVW $CLOCK_REALTIME, R0 278 MOVW $8(R13), R1 // timespec 279 MOVW runtime·vdsoClockgettimeSym(SB), R2 280 CMP $0, R2 281 B.EQ fallback 282 283 // Store g on gsignal's stack, so if we receive a signal 284 // during VDSO code we can find the g. 285 // If we don't have a signal stack, we won't receive signal, 286 // so don't bother saving g. 287 // When using cgo, we already saved g on TLS, also don't save 288 // g here. 289 // Also don't save g if we are already on the signal stack. 290 // We won't get a nested signal. 291 MOVB runtime·iscgo(SB), R6 292 CMP $0, R6 293 BNE nosaveg 294 MOVW m_gsignal(R5), R6 // g.m.gsignal 295 CMP $0, R6 296 BEQ nosaveg 297 CMP g, R6 298 BEQ nosaveg 299 MOVW (g_stack+stack_lo)(R6), R6 // g.m.gsignal.stack.lo 300 MOVW g, (R6) 301 302 BL (R2) 303 304 MOVW $0, R1 305 MOVW R1, (R6) // clear g slot, R6 is unchanged by C code 306 307 JMP finish 308 309 nosaveg: 310 BL (R2) 311 JMP finish 312 313 fallback: 314 MOVW $SYS_clock_gettime, R7 315 SWI $0 316 317 finish: 318 MOVW 8(R13), R0 // sec 319 MOVW 12(R13), R2 // nsec 320 321 MOVW R4, R13 // Restore real SP 322 // Restore vdsoPC, vdsoSP 323 // We don't worry about being signaled between the two stores. 324 // If we are not in a signal handler, we'll restore vdsoSP to 0, 325 // and no one will care about vdsoPC. If we are in a signal handler, 326 // we cannot receive another signal. 327 MOVW 8(R13), R1 328 MOVW R1, m_vdsoSP(R5) 329 MOVW 4(R13), R1 330 MOVW R1, m_vdsoPC(R5) 331 332 MOVW R0, sec_lo+0(FP) 333 MOVW R1, sec_hi+4(FP) 334 MOVW R2, nsec+8(FP) 335 RET 336 337 // int64 nanotime1(void) 338 TEXT runtime·nanotime1(SB),NOSPLIT,$8-8 339 // Switch to g0 stack. See comment above in runtime·walltime. 340 341 // Save old SP. Use R13 instead of SP to avoid linker rewriting the offsets. 342 MOVW R13, R4 // R4 is unchanged by C code. 343 344 MOVW g_m(g), R5 // R5 is unchanged by C code. 345 346 // Set vdsoPC and vdsoSP for SIGPROF traceback. 347 // Save the old values on stack and restore them on exit, 348 // so this function is reentrant. 349 MOVW m_vdsoPC(R5), R1 350 MOVW m_vdsoSP(R5), R2 351 MOVW R1, 4(R13) 352 MOVW R2, 8(R13) 353 354 MOVW LR, m_vdsoPC(R5) 355 MOVW R13, m_vdsoSP(R5) 356 357 MOVW m_curg(R5), R0 358 359 CMP g, R0 // Only switch if on curg. 360 B.NE noswitch 361 362 MOVW m_g0(R5), R0 363 MOVW (g_sched+gobuf_sp)(R0), R13 // Set SP to g0 stack 364 365 noswitch: 366 SUB $24, R13 // Space for results 367 BIC $0x7, R13 // Align for C code 368 369 MOVW $CLOCK_MONOTONIC, R0 370 MOVW $8(R13), R1 // timespec 371 MOVW runtime·vdsoClockgettimeSym(SB), R2 372 CMP $0, R2 373 B.EQ fallback 374 375 // Store g on gsignal's stack, so if we receive a signal 376 // during VDSO code we can find the g. 377 // If we don't have a signal stack, we won't receive signal, 378 // so don't bother saving g. 379 // When using cgo, we already saved g on TLS, also don't save 380 // g here. 381 // Also don't save g if we are already on the signal stack. 382 // We won't get a nested signal. 383 MOVB runtime·iscgo(SB), R6 384 CMP $0, R6 385 BNE nosaveg 386 MOVW m_gsignal(R5), R6 // g.m.gsignal 387 CMP $0, R6 388 BEQ nosaveg 389 CMP g, R6 390 BEQ nosaveg 391 MOVW (g_stack+stack_lo)(R6), R6 // g.m.gsignal.stack.lo 392 MOVW g, (R6) 393 394 BL (R2) 395 396 MOVW $0, R1 397 MOVW R1, (R6) // clear g slot, R6 is unchanged by C code 398 399 JMP finish 400 401 nosaveg: 402 BL (R2) 403 JMP finish 404 405 fallback: 406 MOVW $SYS_clock_gettime, R7 407 SWI $0 408 409 finish: 410 MOVW 8(R13), R0 // sec 411 MOVW 12(R13), R2 // nsec 412 413 MOVW R4, R13 // Restore real SP 414 // Restore vdsoPC, vdsoSP 415 // We don't worry about being signaled between the two stores. 416 // If we are not in a signal handler, we'll restore vdsoSP to 0, 417 // and no one will care about vdsoPC. If we are in a signal handler, 418 // we cannot receive another signal. 419 MOVW 8(R13), R4 420 MOVW R4, m_vdsoSP(R5) 421 MOVW 4(R13), R4 422 MOVW R4, m_vdsoPC(R5) 423 424 MOVW $1000000000, R3 425 MULLU R0, R3, (R1, R0) 426 ADD.S R2, R0 427 ADC R4, R1 428 429 MOVW R0, ret_lo+0(FP) 430 MOVW R1, ret_hi+4(FP) 431 RET 432 433 // int32 futex(int32 *uaddr, int32 op, int32 val, 434 // struct timespec *timeout, int32 *uaddr2, int32 val2); 435 TEXT runtime·futex(SB),NOSPLIT,$0 436 MOVW addr+0(FP), R0 437 MOVW op+4(FP), R1 438 MOVW val+8(FP), R2 439 MOVW ts+12(FP), R3 440 MOVW addr2+16(FP), R4 441 MOVW val3+20(FP), R5 442 MOVW $SYS_futex, R7 443 SWI $0 444 MOVW R0, ret+24(FP) 445 RET 446 447 // int32 clone(int32 flags, void *stack, M *mp, G *gp, void (*fn)(void)); 448 TEXT runtime·clone(SB),NOSPLIT,$0 449 MOVW flags+0(FP), R0 450 MOVW stk+4(FP), R1 451 MOVW $0, R2 // parent tid ptr 452 MOVW $0, R3 // tls_val 453 MOVW $0, R4 // child tid ptr 454 MOVW $0, R5 455 456 // Copy mp, gp, fn off parent stack for use by child. 457 MOVW $-16(R1), R1 458 MOVW mp+8(FP), R6 459 MOVW R6, 0(R1) 460 MOVW gp+12(FP), R6 461 MOVW R6, 4(R1) 462 MOVW fn+16(FP), R6 463 MOVW R6, 8(R1) 464 MOVW $1234, R6 465 MOVW R6, 12(R1) 466 467 MOVW $SYS_clone, R7 468 SWI $0 469 470 // In parent, return. 471 CMP $0, R0 472 BEQ 3(PC) 473 MOVW R0, ret+20(FP) 474 RET 475 476 // Paranoia: check that SP is as we expect. Use R13 to avoid linker 'fixup' 477 NOP R13 // tell vet SP/R13 changed - stop checking offsets 478 MOVW 12(R13), R0 479 MOVW $1234, R1 480 CMP R0, R1 481 BEQ 2(PC) 482 BL runtime·abort(SB) 483 484 MOVW 0(R13), R8 // m 485 MOVW 4(R13), R0 // g 486 487 CMP $0, R8 488 BEQ nog 489 CMP $0, R0 490 BEQ nog 491 492 MOVW R0, g 493 MOVW R8, g_m(g) 494 495 // paranoia; check they are not nil 496 MOVW 0(R8), R0 497 MOVW 0(g), R0 498 499 BL runtime·emptyfunc(SB) // fault if stack check is wrong 500 501 // Initialize m->procid to Linux tid 502 MOVW $SYS_gettid, R7 503 SWI $0 504 MOVW g_m(g), R8 505 MOVW R0, m_procid(R8) 506 507 nog: 508 // Call fn 509 MOVW 8(R13), R0 510 MOVW $16(R13), R13 511 BL (R0) 512 513 // It shouldn't return. If it does, exit that thread. 514 SUB $16, R13 // restore the stack pointer to avoid memory corruption 515 MOVW $0, R0 516 MOVW R0, 4(R13) 517 BL exit1<>(SB) 518 519 MOVW $1234, R0 520 MOVW $1005, R1 521 MOVW R0, (R1) 522 523 TEXT runtime·sigaltstack(SB),NOSPLIT,$0 524 MOVW new+0(FP), R0 525 MOVW old+4(FP), R1 526 MOVW $SYS_sigaltstack, R7 527 SWI $0 528 MOVW $0xfffff001, R6 529 CMP R6, R0 530 MOVW.HI $0, R8 // crash on syscall failure 531 MOVW.HI R8, (R8) 532 RET 533 534 TEXT runtime·sigfwd(SB),NOSPLIT,$0-16 535 MOVW sig+4(FP), R0 536 MOVW info+8(FP), R1 537 MOVW ctx+12(FP), R2 538 MOVW fn+0(FP), R11 539 MOVW R13, R4 540 SUB $24, R13 541 BIC $0x7, R13 // alignment for ELF ABI 542 BL (R11) 543 MOVW R4, R13 544 RET 545 546 TEXT runtime·sigtramp(SB),NOSPLIT,$0 547 // Reserve space for callee-save registers and arguments. 548 MOVM.DB.W [R4-R11], (R13) 549 SUB $16, R13 550 551 // this might be called in external code context, 552 // where g is not set. 553 // first save R0, because runtime·load_g will clobber it 554 MOVW R0, 4(R13) 555 MOVB runtime·iscgo(SB), R0 556 CMP $0, R0 557 BL.NE runtime·load_g(SB) 558 559 MOVW R1, 8(R13) 560 MOVW R2, 12(R13) 561 MOVW $runtime·sigtrampgo(SB), R11 562 BL (R11) 563 564 // Restore callee-save registers. 565 ADD $16, R13 566 MOVM.IA.W (R13), [R4-R11] 567 568 RET 569 570 TEXT runtime·cgoSigtramp(SB),NOSPLIT,$0 571 MOVW $runtime·sigtramp(SB), R11 572 B (R11) 573 574 TEXT runtime·rtsigprocmask(SB),NOSPLIT,$0 575 MOVW how+0(FP), R0 576 MOVW new+4(FP), R1 577 MOVW old+8(FP), R2 578 MOVW size+12(FP), R3 579 MOVW $SYS_rt_sigprocmask, R7 580 SWI $0 581 RET 582 583 TEXT runtime·rt_sigaction(SB),NOSPLIT,$0 584 MOVW sig+0(FP), R0 585 MOVW new+4(FP), R1 586 MOVW old+8(FP), R2 587 MOVW size+12(FP), R3 588 MOVW $SYS_rt_sigaction, R7 589 SWI $0 590 MOVW R0, ret+16(FP) 591 RET 592 593 TEXT runtime·usleep(SB),NOSPLIT,$12 594 MOVW usec+0(FP), R0 595 CALL runtime·usplitR0(SB) 596 MOVW R0, 4(R13) 597 MOVW $1000, R0 // usec to nsec 598 MUL R0, R1 599 MOVW R1, 8(R13) 600 MOVW $4(R13), R0 601 MOVW $0, R1 602 MOVW $SYS_nanosleep, R7 603 SWI $0 604 RET 605 606 // As for cas, memory barriers are complicated on ARM, but the kernel 607 // provides a user helper. ARMv5 does not support SMP and has no 608 // memory barrier instruction at all. ARMv6 added SMP support and has 609 // a memory barrier, but it requires writing to a coprocessor 610 // register. ARMv7 introduced the DMB instruction, but it's expensive 611 // even on single-core devices. The kernel helper takes care of all of 612 // this for us. 613 614 TEXT kernelPublicationBarrier<>(SB),NOSPLIT,$0 615 // void __kuser_memory_barrier(void); 616 MOVW $0xffff0fa0, R11 617 CALL (R11) 618 RET 619 620 TEXT ·publicationBarrier(SB),NOSPLIT,$0 621 MOVB ·goarm(SB), R11 622 CMP $7, R11 623 BLT 2(PC) 624 JMP ·armPublicationBarrier(SB) 625 JMP kernelPublicationBarrier<>(SB) // extra layer so this function is leaf and no SP adjustment on GOARM=7 626 627 TEXT runtime·osyield(SB),NOSPLIT,$0 628 MOVW $SYS_sched_yield, R7 629 SWI $0 630 RET 631 632 TEXT runtime·sched_getaffinity(SB),NOSPLIT,$0 633 MOVW pid+0(FP), R0 634 MOVW len+4(FP), R1 635 MOVW buf+8(FP), R2 636 MOVW $SYS_sched_getaffinity, R7 637 SWI $0 638 MOVW R0, ret+12(FP) 639 RET 640 641 // int32 runtime·epollcreate(int32 size) 642 TEXT runtime·epollcreate(SB),NOSPLIT,$0 643 MOVW size+0(FP), R0 644 MOVW $SYS_epoll_create, R7 645 SWI $0 646 MOVW R0, ret+4(FP) 647 RET 648 649 // int32 runtime·epollcreate1(int32 flags) 650 TEXT runtime·epollcreate1(SB),NOSPLIT,$0 651 MOVW flags+0(FP), R0 652 MOVW $SYS_epoll_create1, R7 653 SWI $0 654 MOVW R0, ret+4(FP) 655 RET 656 657 // func epollctl(epfd, op, fd int32, ev *epollEvent) int 658 TEXT runtime·epollctl(SB),NOSPLIT,$0 659 MOVW epfd+0(FP), R0 660 MOVW op+4(FP), R1 661 MOVW fd+8(FP), R2 662 MOVW ev+12(FP), R3 663 MOVW $SYS_epoll_ctl, R7 664 SWI $0 665 MOVW R0, ret+16(FP) 666 RET 667 668 // int32 runtime·epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout) 669 TEXT runtime·epollwait(SB),NOSPLIT,$0 670 MOVW epfd+0(FP), R0 671 MOVW ev+4(FP), R1 672 MOVW nev+8(FP), R2 673 MOVW timeout+12(FP), R3 674 MOVW $SYS_epoll_wait, R7 675 SWI $0 676 MOVW R0, ret+16(FP) 677 RET 678 679 // void runtime·closeonexec(int32 fd) 680 TEXT runtime·closeonexec(SB),NOSPLIT,$0 681 MOVW fd+0(FP), R0 // fd 682 MOVW $2, R1 // F_SETFD 683 MOVW $1, R2 // FD_CLOEXEC 684 MOVW $SYS_fcntl, R7 685 SWI $0 686 RET 687 688 // func runtime·setNonblock(fd int32) 689 TEXT runtime·setNonblock(SB),NOSPLIT,$0-4 690 MOVW fd+0(FP), R0 // fd 691 MOVW $3, R1 // F_GETFL 692 MOVW $0, R2 693 MOVW $SYS_fcntl, R7 694 SWI $0 695 ORR $0x800, R0, R2 // O_NONBLOCK 696 MOVW fd+0(FP), R0 // fd 697 MOVW $4, R1 // F_SETFL 698 MOVW $SYS_fcntl, R7 699 SWI $0 700 RET 701 702 // b __kuser_get_tls @ 0xffff0fe0 703 TEXT runtime·read_tls_fallback(SB),NOSPLIT|NOFRAME,$0 704 MOVW $0xffff0fe0, R0 705 B (R0) 706 707 TEXT runtime·access(SB),NOSPLIT,$0 708 MOVW name+0(FP), R0 709 MOVW mode+4(FP), R1 710 MOVW $SYS_access, R7 711 SWI $0 712 MOVW R0, ret+8(FP) 713 RET 714 715 TEXT runtime·connect(SB),NOSPLIT,$0 716 MOVW fd+0(FP), R0 717 MOVW addr+4(FP), R1 718 MOVW len+8(FP), R2 719 MOVW $SYS_connect, R7 720 SWI $0 721 MOVW R0, ret+12(FP) 722 RET 723 724 TEXT runtime·socket(SB),NOSPLIT,$0 725 MOVW domain+0(FP), R0 726 MOVW typ+4(FP), R1 727 MOVW prot+8(FP), R2 728 MOVW $SYS_socket, R7 729 SWI $0 730 MOVW R0, ret+12(FP) 731 RET 732 733 // func sbrk0() uintptr 734 TEXT runtime·sbrk0(SB),NOSPLIT,$0-4 735 // Implemented as brk(NULL). 736 MOVW $0, R0 737 MOVW $SYS_brk, R7 738 SWI $0 739 MOVW R0, ret+0(FP) 740 RET 741 742 TEXT runtime·sigreturn(SB),NOSPLIT,$0-0 743 RET