github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/runtime/asm_arm64.s (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 #include "go_asm.h" 6 #include "go_tls.h" 7 #include "tls_arm64.h" 8 #include "funcdata.h" 9 #include "textflag.h" 10 11 TEXT runtime·rt0_go(SB),NOSPLIT,$0 12 // SP = stack; R0 = argc; R1 = argv 13 14 SUB $32, RSP 15 MOVW R0, 8(RSP) // argc 16 MOVD R1, 16(RSP) // argv 17 18 // create istack out of the given (operating system) stack. 19 // _cgo_init may update stackguard. 20 MOVD $runtime·g0(SB), g 21 MOVD RSP, R7 22 MOVD $(-64*1024)(R7), R0 23 MOVD R0, g_stackguard0(g) 24 MOVD R0, g_stackguard1(g) 25 MOVD R0, (g_stack+stack_lo)(g) 26 MOVD R7, (g_stack+stack_hi)(g) 27 28 // if there is a _cgo_init, call it using the gcc ABI. 29 MOVD _cgo_init(SB), R12 30 CMP $0, R12 31 BEQ nocgo 32 33 MRS_TPIDR_R0 // load TLS base pointer 34 MOVD R0, R3 // arg 3: TLS base pointer 35 #ifdef TLSG_IS_VARIABLE 36 MOVD $runtime·tls_g(SB), R2 // arg 2: &tls_g 37 #else 38 MOVD $0, R2 // arg 2: not used when using platform's TLS 39 #endif 40 MOVD $setg_gcc<>(SB), R1 // arg 1: setg 41 MOVD g, R0 // arg 0: G 42 SUB $16, RSP // reserve 16 bytes for sp-8 where fp may be saved. 43 BL (R12) 44 ADD $16, RSP 45 46 nocgo: 47 BL runtime·save_g(SB) 48 // update stackguard after _cgo_init 49 MOVD (g_stack+stack_lo)(g), R0 50 ADD $const__StackGuard, R0 51 MOVD R0, g_stackguard0(g) 52 MOVD R0, g_stackguard1(g) 53 54 // set the per-goroutine and per-mach "registers" 55 MOVD $runtime·m0(SB), R0 56 57 // save m->g0 = g0 58 MOVD g, m_g0(R0) 59 // save m0 to g0->m 60 MOVD R0, g_m(g) 61 62 BL runtime·check(SB) 63 64 MOVW 8(RSP), R0 // copy argc 65 MOVW R0, -8(RSP) 66 MOVD 16(RSP), R0 // copy argv 67 MOVD R0, 0(RSP) 68 BL runtime·args(SB) 69 BL runtime·osinit(SB) 70 BL runtime·schedinit(SB) 71 72 // create a new goroutine to start program 73 MOVD $runtime·mainPC(SB), R0 // entry 74 MOVD RSP, R7 75 MOVD.W $0, -8(R7) 76 MOVD.W R0, -8(R7) 77 MOVD.W $0, -8(R7) 78 MOVD.W $0, -8(R7) 79 MOVD R7, RSP 80 BL runtime·newproc(SB) 81 ADD $32, RSP 82 83 // start this M 84 BL runtime·mstart(SB) 85 86 MOVD $0, R0 87 MOVD R0, (R0) // boom 88 UNDEF 89 90 DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) 91 GLOBL runtime·mainPC(SB),RODATA,$8 92 93 TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 94 BRK 95 RET 96 97 TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 98 RET 99 100 /* 101 * go-routine 102 */ 103 104 // void gosave(Gobuf*) 105 // save state in Gobuf; setjmp 106 TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 107 MOVD buf+0(FP), R3 108 MOVD RSP, R0 109 MOVD R0, gobuf_sp(R3) 110 MOVD R29, gobuf_bp(R3) 111 MOVD LR, gobuf_pc(R3) 112 MOVD g, gobuf_g(R3) 113 MOVD ZR, gobuf_lr(R3) 114 MOVD ZR, gobuf_ret(R3) 115 // Assert ctxt is zero. See func save. 116 MOVD gobuf_ctxt(R3), R0 117 CMP $0, R0 118 BEQ 2(PC) 119 CALL runtime·badctxt(SB) 120 RET 121 122 // void gogo(Gobuf*) 123 // restore state from Gobuf; longjmp 124 TEXT runtime·gogo(SB), NOSPLIT, $24-8 125 MOVD buf+0(FP), R5 126 MOVD gobuf_g(R5), g 127 BL runtime·save_g(SB) 128 129 MOVD 0(g), R4 // make sure g is not nil 130 MOVD gobuf_sp(R5), R0 131 MOVD R0, RSP 132 MOVD gobuf_bp(R5), R29 133 MOVD gobuf_lr(R5), LR 134 MOVD gobuf_ret(R5), R0 135 MOVD gobuf_ctxt(R5), R26 136 MOVD $0, gobuf_sp(R5) 137 MOVD $0, gobuf_bp(R5) 138 MOVD $0, gobuf_ret(R5) 139 MOVD $0, gobuf_lr(R5) 140 MOVD $0, gobuf_ctxt(R5) 141 CMP ZR, ZR // set condition codes for == test, needed by stack split 142 MOVD gobuf_pc(R5), R6 143 B (R6) 144 145 // void mcall(fn func(*g)) 146 // Switch to m->g0's stack, call fn(g). 147 // Fn must never return. It should gogo(&g->sched) 148 // to keep running g. 149 TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 150 // Save caller state in g->sched 151 MOVD RSP, R0 152 MOVD R0, (g_sched+gobuf_sp)(g) 153 MOVD R29, (g_sched+gobuf_bp)(g) 154 MOVD LR, (g_sched+gobuf_pc)(g) 155 MOVD $0, (g_sched+gobuf_lr)(g) 156 MOVD g, (g_sched+gobuf_g)(g) 157 158 // Switch to m->g0 & its stack, call fn. 159 MOVD g, R3 160 MOVD g_m(g), R8 161 MOVD m_g0(R8), g 162 BL runtime·save_g(SB) 163 CMP g, R3 164 BNE 2(PC) 165 B runtime·badmcall(SB) 166 MOVD fn+0(FP), R26 // context 167 MOVD 0(R26), R4 // code pointer 168 MOVD (g_sched+gobuf_sp)(g), R0 169 MOVD R0, RSP // sp = m->g0->sched.sp 170 MOVD (g_sched+gobuf_bp)(g), R29 171 MOVD R3, -8(RSP) 172 MOVD $0, -16(RSP) 173 SUB $16, RSP 174 BL (R4) 175 B runtime·badmcall2(SB) 176 177 // systemstack_switch is a dummy routine that systemstack leaves at the bottom 178 // of the G stack. We need to distinguish the routine that 179 // lives at the bottom of the G stack from the one that lives 180 // at the top of the system stack because the one at the top of 181 // the system stack terminates the stack walk (see topofstack()). 182 TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 183 UNDEF 184 BL (LR) // make sure this function is not leaf 185 RET 186 187 // func systemstack(fn func()) 188 TEXT runtime·systemstack(SB), NOSPLIT, $0-8 189 MOVD fn+0(FP), R3 // R3 = fn 190 MOVD R3, R26 // context 191 MOVD g_m(g), R4 // R4 = m 192 193 MOVD m_gsignal(R4), R5 // R5 = gsignal 194 CMP g, R5 195 BEQ noswitch 196 197 MOVD m_g0(R4), R5 // R5 = g0 198 CMP g, R5 199 BEQ noswitch 200 201 MOVD m_curg(R4), R6 202 CMP g, R6 203 BEQ switch 204 205 // Bad: g is not gsignal, not g0, not curg. What is it? 206 // Hide call from linker nosplit analysis. 207 MOVD $runtime·badsystemstack(SB), R3 208 BL (R3) 209 B runtime·abort(SB) 210 211 switch: 212 // save our state in g->sched. Pretend to 213 // be systemstack_switch if the G stack is scanned. 214 MOVD $runtime·systemstack_switch(SB), R6 215 ADD $8, R6 // get past prologue 216 MOVD R6, (g_sched+gobuf_pc)(g) 217 MOVD RSP, R0 218 MOVD R0, (g_sched+gobuf_sp)(g) 219 MOVD R29, (g_sched+gobuf_bp)(g) 220 MOVD $0, (g_sched+gobuf_lr)(g) 221 MOVD g, (g_sched+gobuf_g)(g) 222 223 // switch to g0 224 MOVD R5, g 225 BL runtime·save_g(SB) 226 MOVD (g_sched+gobuf_sp)(g), R3 227 // make it look like mstart called systemstack on g0, to stop traceback 228 SUB $16, R3 229 AND $~15, R3 230 MOVD $runtime·mstart(SB), R4 231 MOVD R4, 0(R3) 232 MOVD R3, RSP 233 MOVD (g_sched+gobuf_bp)(g), R29 234 235 // call target function 236 MOVD 0(R26), R3 // code pointer 237 BL (R3) 238 239 // switch back to g 240 MOVD g_m(g), R3 241 MOVD m_curg(R3), g 242 BL runtime·save_g(SB) 243 MOVD (g_sched+gobuf_sp)(g), R0 244 MOVD R0, RSP 245 MOVD (g_sched+gobuf_bp)(g), R29 246 MOVD $0, (g_sched+gobuf_sp)(g) 247 MOVD $0, (g_sched+gobuf_bp)(g) 248 RET 249 250 noswitch: 251 // already on m stack, just call directly 252 // Using a tail call here cleans up tracebacks since we won't stop 253 // at an intermediate systemstack. 254 MOVD 0(R26), R3 // code pointer 255 MOVD.P 16(RSP), R30 // restore LR 256 SUB $8, RSP, R29 // restore FP 257 B (R3) 258 259 /* 260 * support for morestack 261 */ 262 263 // Called during function prolog when more stack is needed. 264 // Caller has already loaded: 265 // R3 prolog's LR (R30) 266 // 267 // The traceback routines see morestack on a g0 as being 268 // the top of a stack (for example, morestack calling newstack 269 // calling the scheduler calling newm calling gc), so we must 270 // record an argument size. For that purpose, it has no arguments. 271 TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 272 // Cannot grow scheduler stack (m->g0). 273 MOVD g_m(g), R8 274 MOVD m_g0(R8), R4 275 CMP g, R4 276 BNE 3(PC) 277 BL runtime·badmorestackg0(SB) 278 B runtime·abort(SB) 279 280 // Cannot grow signal stack (m->gsignal). 281 MOVD m_gsignal(R8), R4 282 CMP g, R4 283 BNE 3(PC) 284 BL runtime·badmorestackgsignal(SB) 285 B runtime·abort(SB) 286 287 // Called from f. 288 // Set g->sched to context in f 289 MOVD RSP, R0 290 MOVD R0, (g_sched+gobuf_sp)(g) 291 MOVD R29, (g_sched+gobuf_bp)(g) 292 MOVD LR, (g_sched+gobuf_pc)(g) 293 MOVD R3, (g_sched+gobuf_lr)(g) 294 MOVD R26, (g_sched+gobuf_ctxt)(g) 295 296 // Called from f. 297 // Set m->morebuf to f's callers. 298 MOVD R3, (m_morebuf+gobuf_pc)(R8) // f's caller's PC 299 MOVD RSP, R0 300 MOVD R0, (m_morebuf+gobuf_sp)(R8) // f's caller's RSP 301 MOVD g, (m_morebuf+gobuf_g)(R8) 302 303 // Call newstack on m->g0's stack. 304 MOVD m_g0(R8), g 305 BL runtime·save_g(SB) 306 MOVD (g_sched+gobuf_sp)(g), R0 307 MOVD R0, RSP 308 MOVD (g_sched+gobuf_bp)(g), R29 309 MOVD.W $0, -16(RSP) // create a call frame on g0 (saved LR; keep 16-aligned) 310 BL runtime·newstack(SB) 311 312 // Not reached, but make sure the return PC from the call to newstack 313 // is still in this function, and not the beginning of the next. 314 UNDEF 315 316 TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 317 MOVW $0, R26 318 B runtime·morestack(SB) 319 320 // reflectcall: call a function with the given argument list 321 // func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). 322 // we don't have variable-sized frames, so we use a small number 323 // of constant-sized-frame functions to encode a few bits of size in the pc. 324 // Caution: ugly multiline assembly macros in your future! 325 326 #define DISPATCH(NAME,MAXSIZE) \ 327 MOVD $MAXSIZE, R27; \ 328 CMP R27, R16; \ 329 BGT 3(PC); \ 330 MOVD $NAME(SB), R27; \ 331 B (R27) 332 // Note: can't just "B NAME(SB)" - bad inlining results. 333 334 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 335 MOVWU argsize+24(FP), R16 336 DISPATCH(runtime·call32, 32) 337 DISPATCH(runtime·call64, 64) 338 DISPATCH(runtime·call128, 128) 339 DISPATCH(runtime·call256, 256) 340 DISPATCH(runtime·call512, 512) 341 DISPATCH(runtime·call1024, 1024) 342 DISPATCH(runtime·call2048, 2048) 343 DISPATCH(runtime·call4096, 4096) 344 DISPATCH(runtime·call8192, 8192) 345 DISPATCH(runtime·call16384, 16384) 346 DISPATCH(runtime·call32768, 32768) 347 DISPATCH(runtime·call65536, 65536) 348 DISPATCH(runtime·call131072, 131072) 349 DISPATCH(runtime·call262144, 262144) 350 DISPATCH(runtime·call524288, 524288) 351 DISPATCH(runtime·call1048576, 1048576) 352 DISPATCH(runtime·call2097152, 2097152) 353 DISPATCH(runtime·call4194304, 4194304) 354 DISPATCH(runtime·call8388608, 8388608) 355 DISPATCH(runtime·call16777216, 16777216) 356 DISPATCH(runtime·call33554432, 33554432) 357 DISPATCH(runtime·call67108864, 67108864) 358 DISPATCH(runtime·call134217728, 134217728) 359 DISPATCH(runtime·call268435456, 268435456) 360 DISPATCH(runtime·call536870912, 536870912) 361 DISPATCH(runtime·call1073741824, 1073741824) 362 MOVD $runtime·badreflectcall(SB), R0 363 B (R0) 364 365 #define CALLFN(NAME,MAXSIZE) \ 366 TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ 367 NO_LOCAL_POINTERS; \ 368 /* copy arguments to stack */ \ 369 MOVD arg+16(FP), R3; \ 370 MOVWU argsize+24(FP), R4; \ 371 ADD $8, RSP, R5; \ 372 BIC $0xf, R4, R6; \ 373 CBZ R6, 6(PC); \ 374 /* if R6=(argsize&~15) != 0 */ \ 375 ADD R6, R5, R6; \ 376 /* copy 16 bytes a time */ \ 377 LDP.P 16(R3), (R7, R8); \ 378 STP.P (R7, R8), 16(R5); \ 379 CMP R5, R6; \ 380 BNE -3(PC); \ 381 AND $0xf, R4, R6; \ 382 CBZ R6, 6(PC); \ 383 /* if R6=(argsize&15) != 0 */ \ 384 ADD R6, R5, R6; \ 385 /* copy 1 byte a time for the rest */ \ 386 MOVBU.P 1(R3), R7; \ 387 MOVBU.P R7, 1(R5); \ 388 CMP R5, R6; \ 389 BNE -3(PC); \ 390 /* call function */ \ 391 MOVD f+8(FP), R26; \ 392 MOVD (R26), R0; \ 393 PCDATA $PCDATA_StackMapIndex, $0; \ 394 BL (R0); \ 395 /* copy return values back */ \ 396 MOVD argtype+0(FP), R7; \ 397 MOVD arg+16(FP), R3; \ 398 MOVWU n+24(FP), R4; \ 399 MOVWU retoffset+28(FP), R6; \ 400 ADD $8, RSP, R5; \ 401 ADD R6, R5; \ 402 ADD R6, R3; \ 403 SUB R6, R4; \ 404 BL callRet<>(SB); \ 405 RET 406 407 // callRet copies return values back at the end of call*. This is a 408 // separate function so it can allocate stack space for the arguments 409 // to reflectcallmove. It does not follow the Go ABI; it expects its 410 // arguments in registers. 411 TEXT callRet<>(SB), NOSPLIT, $40-0 412 MOVD R7, 8(RSP) 413 MOVD R3, 16(RSP) 414 MOVD R5, 24(RSP) 415 MOVD R4, 32(RSP) 416 BL runtime·reflectcallmove(SB) 417 RET 418 419 // These have 8 added to make the overall frame size a multiple of 16, 420 // as required by the ABI. (There is another +8 for the saved LR.) 421 CALLFN(·call32, 40 ) 422 CALLFN(·call64, 72 ) 423 CALLFN(·call128, 136 ) 424 CALLFN(·call256, 264 ) 425 CALLFN(·call512, 520 ) 426 CALLFN(·call1024, 1032 ) 427 CALLFN(·call2048, 2056 ) 428 CALLFN(·call4096, 4104 ) 429 CALLFN(·call8192, 8200 ) 430 CALLFN(·call16384, 16392 ) 431 CALLFN(·call32768, 32776 ) 432 CALLFN(·call65536, 65544 ) 433 CALLFN(·call131072, 131080 ) 434 CALLFN(·call262144, 262152 ) 435 CALLFN(·call524288, 524296 ) 436 CALLFN(·call1048576, 1048584 ) 437 CALLFN(·call2097152, 2097160 ) 438 CALLFN(·call4194304, 4194312 ) 439 CALLFN(·call8388608, 8388616 ) 440 CALLFN(·call16777216, 16777224 ) 441 CALLFN(·call33554432, 33554440 ) 442 CALLFN(·call67108864, 67108872 ) 443 CALLFN(·call134217728, 134217736 ) 444 CALLFN(·call268435456, 268435464 ) 445 CALLFN(·call536870912, 536870920 ) 446 CALLFN(·call1073741824, 1073741832 ) 447 448 // func aeshash32(p unsafe.Pointer, h uintptr) uintptr 449 TEXT runtime·aeshash32(SB),NOSPLIT|NOFRAME,$0-24 450 MOVD p+0(FP), R0 451 MOVD h+8(FP), R1 452 MOVD $ret+16(FP), R2 453 MOVD $runtime·aeskeysched+0(SB), R3 454 455 VEOR V0.B16, V0.B16, V0.B16 456 VLD1 (R3), [V2.B16] 457 VLD1 (R0), V0.S[1] 458 VMOV R1, V0.S[0] 459 460 AESE V2.B16, V0.B16 461 AESMC V0.B16, V0.B16 462 AESE V2.B16, V0.B16 463 AESMC V0.B16, V0.B16 464 AESE V2.B16, V0.B16 465 466 VST1 [V0.D1], (R2) 467 RET 468 469 // func aeshash64(p unsafe.Pointer, h uintptr) uintptr 470 TEXT runtime·aeshash64(SB),NOSPLIT|NOFRAME,$0-24 471 MOVD p+0(FP), R0 472 MOVD h+8(FP), R1 473 MOVD $ret+16(FP), R2 474 MOVD $runtime·aeskeysched+0(SB), R3 475 476 VEOR V0.B16, V0.B16, V0.B16 477 VLD1 (R3), [V2.B16] 478 VLD1 (R0), V0.D[1] 479 VMOV R1, V0.D[0] 480 481 AESE V2.B16, V0.B16 482 AESMC V0.B16, V0.B16 483 AESE V2.B16, V0.B16 484 AESMC V0.B16, V0.B16 485 AESE V2.B16, V0.B16 486 487 VST1 [V0.D1], (R2) 488 RET 489 490 // func aeshash(p unsafe.Pointer, h, size uintptr) uintptr 491 TEXT runtime·aeshash(SB),NOSPLIT|NOFRAME,$0-32 492 MOVD p+0(FP), R0 493 MOVD s+16(FP), R1 494 MOVWU h+8(FP), R3 495 MOVD $ret+24(FP), R2 496 B aeshashbody<>(SB) 497 498 // func aeshashstr(p unsafe.Pointer, h uintptr) uintptr 499 TEXT runtime·aeshashstr(SB),NOSPLIT|NOFRAME,$0-24 500 MOVD p+0(FP), R10 // string pointer 501 LDP (R10), (R0, R1) //string data/ length 502 MOVWU h+8(FP), R3 503 MOVD $ret+16(FP), R2 // return adddress 504 B aeshashbody<>(SB) 505 506 // R0: data 507 // R1: length (maximum 32 bits) 508 // R2: address to put return value 509 // R3: seed data 510 TEXT aeshashbody<>(SB),NOSPLIT|NOFRAME,$0 511 VEOR V30.B16, V30.B16, V30.B16 512 VMOV R3, V30.S[0] 513 VMOV R1, V30.S[1] // load length into seed 514 515 MOVD $runtime·aeskeysched+0(SB), R4 516 VLD1.P 16(R4), [V0.B16] 517 AESE V30.B16, V0.B16 518 AESMC V0.B16, V0.B16 519 CMP $16, R1 520 BLO aes0to15 521 BEQ aes16 522 CMP $32, R1 523 BLS aes17to32 524 CMP $64, R1 525 BLS aes33to64 526 CMP $128, R1 527 BLS aes65to128 528 B aes129plus 529 530 aes0to15: 531 CMP $0, R1 532 BEQ aes0 533 VEOR V2.B16, V2.B16, V2.B16 534 TBZ $3, R1, less_than_8 535 VLD1.P 8(R0), V2.D[0] 536 537 less_than_8: 538 TBZ $2, R1, less_than_4 539 VLD1.P 4(R0), V2.S[2] 540 541 less_than_4: 542 TBZ $1, R1, less_than_2 543 VLD1.P 2(R0), V2.H[6] 544 545 less_than_2: 546 TBZ $0, R1, done 547 VLD1 (R0), V2.B[14] 548 done: 549 AESE V0.B16, V2.B16 550 AESMC V2.B16, V2.B16 551 AESE V0.B16, V2.B16 552 AESMC V2.B16, V2.B16 553 AESE V0.B16, V2.B16 554 555 VST1 [V2.D1], (R2) 556 RET 557 aes0: 558 VST1 [V0.D1], (R2) 559 RET 560 aes16: 561 VLD1 (R0), [V2.B16] 562 B done 563 564 aes17to32: 565 // make second seed 566 VLD1 (R4), [V1.B16] 567 AESE V30.B16, V1.B16 568 AESMC V1.B16, V1.B16 569 SUB $16, R1, R10 570 VLD1.P (R0)(R10), [V2.B16] 571 VLD1 (R0), [V3.B16] 572 573 AESE V0.B16, V2.B16 574 AESMC V2.B16, V2.B16 575 AESE V1.B16, V3.B16 576 AESMC V3.B16, V3.B16 577 578 AESE V0.B16, V2.B16 579 AESMC V2.B16, V2.B16 580 AESE V1.B16, V3.B16 581 AESMC V3.B16, V3.B16 582 583 AESE V0.B16, V2.B16 584 AESE V1.B16, V3.B16 585 586 VEOR V3.B16, V2.B16, V2.B16 587 VST1 [V2.D1], (R2) 588 RET 589 590 aes33to64: 591 VLD1 (R4), [V1.B16, V2.B16, V3.B16] 592 AESE V30.B16, V1.B16 593 AESMC V1.B16, V1.B16 594 AESE V30.B16, V2.B16 595 AESMC V2.B16, V2.B16 596 AESE V30.B16, V3.B16 597 AESMC V3.B16, V3.B16 598 SUB $32, R1, R10 599 600 VLD1.P (R0)(R10), [V4.B16, V5.B16] 601 VLD1 (R0), [V6.B16, V7.B16] 602 603 AESE V0.B16, V4.B16 604 AESMC V4.B16, V4.B16 605 AESE V1.B16, V5.B16 606 AESMC V5.B16, V5.B16 607 AESE V2.B16, V6.B16 608 AESMC V6.B16, V6.B16 609 AESE V3.B16, V7.B16 610 AESMC V7.B16, V7.B16 611 612 AESE V0.B16, V4.B16 613 AESMC V4.B16, V4.B16 614 AESE V1.B16, V5.B16 615 AESMC V5.B16, V5.B16 616 AESE V2.B16, V6.B16 617 AESMC V6.B16, V6.B16 618 AESE V3.B16, V7.B16 619 AESMC V7.B16, V7.B16 620 621 AESE V0.B16, V4.B16 622 AESE V1.B16, V5.B16 623 AESE V2.B16, V6.B16 624 AESE V3.B16, V7.B16 625 626 VEOR V6.B16, V4.B16, V4.B16 627 VEOR V7.B16, V5.B16, V5.B16 628 VEOR V5.B16, V4.B16, V4.B16 629 630 VST1 [V4.D1], (R2) 631 RET 632 633 aes65to128: 634 VLD1.P 64(R4), [V1.B16, V2.B16, V3.B16, V4.B16] 635 VLD1 (R4), [V5.B16, V6.B16, V7.B16] 636 AESE V30.B16, V1.B16 637 AESMC V1.B16, V1.B16 638 AESE V30.B16, V2.B16 639 AESMC V2.B16, V2.B16 640 AESE V30.B16, V3.B16 641 AESMC V3.B16, V3.B16 642 AESE V30.B16, V4.B16 643 AESMC V4.B16, V4.B16 644 AESE V30.B16, V5.B16 645 AESMC V5.B16, V5.B16 646 AESE V30.B16, V6.B16 647 AESMC V6.B16, V6.B16 648 AESE V30.B16, V7.B16 649 AESMC V7.B16, V7.B16 650 651 SUB $64, R1, R10 652 VLD1.P (R0)(R10), [V8.B16, V9.B16, V10.B16, V11.B16] 653 VLD1 (R0), [V12.B16, V13.B16, V14.B16, V15.B16] 654 AESE V0.B16, V8.B16 655 AESMC V8.B16, V8.B16 656 AESE V1.B16, V9.B16 657 AESMC V9.B16, V9.B16 658 AESE V2.B16, V10.B16 659 AESMC V10.B16, V10.B16 660 AESE V3.B16, V11.B16 661 AESMC V11.B16, V11.B16 662 AESE V4.B16, V12.B16 663 AESMC V12.B16, V12.B16 664 AESE V5.B16, V13.B16 665 AESMC V13.B16, V13.B16 666 AESE V6.B16, V14.B16 667 AESMC V14.B16, V14.B16 668 AESE V7.B16, V15.B16 669 AESMC V15.B16, V15.B16 670 671 AESE V0.B16, V8.B16 672 AESMC V8.B16, V8.B16 673 AESE V1.B16, V9.B16 674 AESMC V9.B16, V9.B16 675 AESE V2.B16, V10.B16 676 AESMC V10.B16, V10.B16 677 AESE V3.B16, V11.B16 678 AESMC V11.B16, V11.B16 679 AESE V4.B16, V12.B16 680 AESMC V12.B16, V12.B16 681 AESE V5.B16, V13.B16 682 AESMC V13.B16, V13.B16 683 AESE V6.B16, V14.B16 684 AESMC V14.B16, V14.B16 685 AESE V7.B16, V15.B16 686 AESMC V15.B16, V15.B16 687 688 AESE V0.B16, V8.B16 689 AESE V1.B16, V9.B16 690 AESE V2.B16, V10.B16 691 AESE V3.B16, V11.B16 692 AESE V4.B16, V12.B16 693 AESE V5.B16, V13.B16 694 AESE V6.B16, V14.B16 695 AESE V7.B16, V15.B16 696 697 VEOR V12.B16, V8.B16, V8.B16 698 VEOR V13.B16, V9.B16, V9.B16 699 VEOR V14.B16, V10.B16, V10.B16 700 VEOR V15.B16, V11.B16, V11.B16 701 VEOR V10.B16, V8.B16, V8.B16 702 VEOR V11.B16, V9.B16, V9.B16 703 VEOR V9.B16, V8.B16, V8.B16 704 705 VST1 [V8.D1], (R2) 706 RET 707 708 aes129plus: 709 PRFM (R0), PLDL1KEEP 710 VLD1.P 64(R4), [V1.B16, V2.B16, V3.B16, V4.B16] 711 VLD1 (R4), [V5.B16, V6.B16, V7.B16] 712 AESE V30.B16, V1.B16 713 AESMC V1.B16, V1.B16 714 AESE V30.B16, V2.B16 715 AESMC V2.B16, V2.B16 716 AESE V30.B16, V3.B16 717 AESMC V3.B16, V3.B16 718 AESE V30.B16, V4.B16 719 AESMC V4.B16, V4.B16 720 AESE V30.B16, V5.B16 721 AESMC V5.B16, V5.B16 722 AESE V30.B16, V6.B16 723 AESMC V6.B16, V6.B16 724 AESE V30.B16, V7.B16 725 AESMC V7.B16, V7.B16 726 ADD R0, R1, R10 727 SUB $128, R10, R10 728 VLD1.P 64(R10), [V8.B16, V9.B16, V10.B16, V11.B16] 729 VLD1 (R10), [V12.B16, V13.B16, V14.B16, V15.B16] 730 SUB $1, R1, R1 731 LSR $7, R1, R1 732 733 aesloop: 734 AESE V8.B16, V0.B16 735 AESMC V0.B16, V0.B16 736 AESE V9.B16, V1.B16 737 AESMC V1.B16, V1.B16 738 AESE V10.B16, V2.B16 739 AESMC V2.B16, V2.B16 740 AESE V11.B16, V3.B16 741 AESMC V3.B16, V3.B16 742 AESE V12.B16, V4.B16 743 AESMC V4.B16, V4.B16 744 AESE V13.B16, V5.B16 745 AESMC V5.B16, V5.B16 746 AESE V14.B16, V6.B16 747 AESMC V6.B16, V6.B16 748 AESE V15.B16, V7.B16 749 AESMC V7.B16, V7.B16 750 751 VLD1.P 64(R0), [V8.B16, V9.B16, V10.B16, V11.B16] 752 AESE V8.B16, V0.B16 753 AESMC V0.B16, V0.B16 754 AESE V9.B16, V1.B16 755 AESMC V1.B16, V1.B16 756 AESE V10.B16, V2.B16 757 AESMC V2.B16, V2.B16 758 AESE V11.B16, V3.B16 759 AESMC V3.B16, V3.B16 760 761 VLD1.P 64(R0), [V12.B16, V13.B16, V14.B16, V15.B16] 762 AESE V12.B16, V4.B16 763 AESMC V4.B16, V4.B16 764 AESE V13.B16, V5.B16 765 AESMC V5.B16, V5.B16 766 AESE V14.B16, V6.B16 767 AESMC V6.B16, V6.B16 768 AESE V15.B16, V7.B16 769 AESMC V7.B16, V7.B16 770 SUB $1, R1, R1 771 CBNZ R1, aesloop 772 773 AESE V8.B16, V0.B16 774 AESMC V0.B16, V0.B16 775 AESE V9.B16, V1.B16 776 AESMC V1.B16, V1.B16 777 AESE V10.B16, V2.B16 778 AESMC V2.B16, V2.B16 779 AESE V11.B16, V3.B16 780 AESMC V3.B16, V3.B16 781 AESE V12.B16, V4.B16 782 AESMC V4.B16, V4.B16 783 AESE V13.B16, V5.B16 784 AESMC V5.B16, V5.B16 785 AESE V14.B16, V6.B16 786 AESMC V6.B16, V6.B16 787 AESE V15.B16, V7.B16 788 AESMC V7.B16, V7.B16 789 790 AESE V8.B16, V0.B16 791 AESMC V0.B16, V0.B16 792 AESE V9.B16, V1.B16 793 AESMC V1.B16, V1.B16 794 AESE V10.B16, V2.B16 795 AESMC V2.B16, V2.B16 796 AESE V11.B16, V3.B16 797 AESMC V3.B16, V3.B16 798 AESE V12.B16, V4.B16 799 AESMC V4.B16, V4.B16 800 AESE V13.B16, V5.B16 801 AESMC V5.B16, V5.B16 802 AESE V14.B16, V6.B16 803 AESMC V6.B16, V6.B16 804 AESE V15.B16, V7.B16 805 AESMC V7.B16, V7.B16 806 807 AESE V8.B16, V0.B16 808 AESE V9.B16, V1.B16 809 AESE V10.B16, V2.B16 810 AESE V11.B16, V3.B16 811 AESE V12.B16, V4.B16 812 AESE V13.B16, V5.B16 813 AESE V14.B16, V6.B16 814 AESE V15.B16, V7.B16 815 816 VEOR V0.B16, V1.B16, V0.B16 817 VEOR V2.B16, V3.B16, V2.B16 818 VEOR V4.B16, V5.B16, V4.B16 819 VEOR V6.B16, V7.B16, V6.B16 820 VEOR V0.B16, V2.B16, V0.B16 821 VEOR V4.B16, V6.B16, V4.B16 822 VEOR V4.B16, V0.B16, V0.B16 823 824 VST1 [V0.D1], (R2) 825 RET 826 827 TEXT runtime·procyield(SB),NOSPLIT,$0-0 828 MOVWU cycles+0(FP), R0 829 again: 830 YIELD 831 SUBW $1, R0 832 CBNZ R0, again 833 RET 834 835 // void jmpdefer(fv, sp); 836 // called from deferreturn. 837 // 1. grab stored LR for caller 838 // 2. sub 4 bytes to get back to BL deferreturn 839 // 3. BR to fn 840 TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 841 MOVD 0(RSP), R0 842 SUB $4, R0 843 MOVD R0, LR 844 845 MOVD fv+0(FP), R26 846 MOVD argp+8(FP), R0 847 MOVD R0, RSP 848 SUB $8, RSP 849 MOVD 0(R26), R3 850 B (R3) 851 852 // Save state of caller into g->sched. Smashes R0. 853 TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0 854 MOVD LR, (g_sched+gobuf_pc)(g) 855 MOVD RSP, R0 856 MOVD R0, (g_sched+gobuf_sp)(g) 857 MOVD R29, (g_sched+gobuf_bp)(g) 858 MOVD $0, (g_sched+gobuf_lr)(g) 859 MOVD $0, (g_sched+gobuf_ret)(g) 860 // Assert ctxt is zero. See func save. 861 MOVD (g_sched+gobuf_ctxt)(g), R0 862 CMP $0, R0 863 BEQ 2(PC) 864 CALL runtime·badctxt(SB) 865 RET 866 867 // func asmcgocall(fn, arg unsafe.Pointer) int32 868 // Call fn(arg) on the scheduler stack, 869 // aligned appropriately for the gcc ABI. 870 // See cgocall.go for more details. 871 TEXT ·asmcgocall(SB),NOSPLIT,$0-20 872 MOVD fn+0(FP), R1 873 MOVD arg+8(FP), R0 874 875 MOVD RSP, R2 // save original stack pointer 876 CMP $0, g 877 BEQ nosave 878 MOVD g, R4 879 880 // Figure out if we need to switch to m->g0 stack. 881 // We get called to create new OS threads too, and those 882 // come in on the m->g0 stack already. 883 MOVD g_m(g), R8 884 MOVD m_gsignal(R8), R3 885 CMP R3, g 886 BEQ nosave 887 MOVD m_g0(R8), R3 888 CMP R3, g 889 BEQ nosave 890 891 // Switch to system stack. 892 MOVD R0, R9 // gosave<> and save_g might clobber R0 893 BL gosave<>(SB) 894 MOVD R3, g 895 BL runtime·save_g(SB) 896 MOVD (g_sched+gobuf_sp)(g), R0 897 MOVD R0, RSP 898 MOVD (g_sched+gobuf_bp)(g), R29 899 MOVD R9, R0 900 901 // Now on a scheduling stack (a pthread-created stack). 902 // Save room for two of our pointers /*, plus 32 bytes of callee 903 // save area that lives on the caller stack. */ 904 MOVD RSP, R13 905 SUB $16, R13 906 MOVD R13, RSP 907 MOVD R4, 0(RSP) // save old g on stack 908 MOVD (g_stack+stack_hi)(R4), R4 909 SUB R2, R4 910 MOVD R4, 8(RSP) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) 911 BL (R1) 912 MOVD R0, R9 913 914 // Restore g, stack pointer. R0 is errno, so don't touch it 915 MOVD 0(RSP), g 916 BL runtime·save_g(SB) 917 MOVD (g_stack+stack_hi)(g), R5 918 MOVD 8(RSP), R6 919 SUB R6, R5 920 MOVD R9, R0 921 MOVD R5, RSP 922 923 MOVW R0, ret+16(FP) 924 RET 925 926 nosave: 927 // Running on a system stack, perhaps even without a g. 928 // Having no g can happen during thread creation or thread teardown 929 // (see needm/dropm on Solaris, for example). 930 // This code is like the above sequence but without saving/restoring g 931 // and without worrying about the stack moving out from under us 932 // (because we're on a system stack, not a goroutine stack). 933 // The above code could be used directly if already on a system stack, 934 // but then the only path through this code would be a rare case on Solaris. 935 // Using this code for all "already on system stack" calls exercises it more, 936 // which should help keep it correct. 937 MOVD RSP, R13 938 SUB $16, R13 939 MOVD R13, RSP 940 MOVD $0, R4 941 MOVD R4, 0(RSP) // Where above code stores g, in case someone looks during debugging. 942 MOVD R2, 8(RSP) // Save original stack pointer. 943 BL (R1) 944 // Restore stack pointer. 945 MOVD 8(RSP), R2 946 MOVD R2, RSP 947 MOVD R0, ret+16(FP) 948 RET 949 950 // cgocallback(void (*fn)(void*), void *frame, uintptr framesize, uintptr ctxt) 951 // Turn the fn into a Go func (by taking its address) and call 952 // cgocallback_gofunc. 953 TEXT runtime·cgocallback(SB),NOSPLIT,$40-32 954 MOVD $fn+0(FP), R0 955 MOVD R0, 8(RSP) 956 MOVD frame+8(FP), R0 957 MOVD R0, 16(RSP) 958 MOVD framesize+16(FP), R0 959 MOVD R0, 24(RSP) 960 MOVD ctxt+24(FP), R0 961 MOVD R0, 32(RSP) 962 MOVD $runtime·cgocallback_gofunc(SB), R0 963 BL (R0) 964 RET 965 966 // cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize, uintptr ctxt) 967 // See cgocall.go for more details. 968 TEXT ·cgocallback_gofunc(SB),NOSPLIT,$24-32 969 NO_LOCAL_POINTERS 970 971 // Load g from thread-local storage. 972 MOVB runtime·iscgo(SB), R3 973 CMP $0, R3 974 BEQ nocgo 975 BL runtime·load_g(SB) 976 nocgo: 977 978 // If g is nil, Go did not create the current thread. 979 // Call needm to obtain one for temporary use. 980 // In this case, we're running on the thread stack, so there's 981 // lots of space, but the linker doesn't know. Hide the call from 982 // the linker analysis by using an indirect call. 983 CMP $0, g 984 BEQ needm 985 986 MOVD g_m(g), R8 987 MOVD R8, savedm-8(SP) 988 B havem 989 990 needm: 991 MOVD g, savedm-8(SP) // g is zero, so is m. 992 MOVD $runtime·needm(SB), R0 993 BL (R0) 994 995 // Set m->sched.sp = SP, so that if a panic happens 996 // during the function we are about to execute, it will 997 // have a valid SP to run on the g0 stack. 998 // The next few lines (after the havem label) 999 // will save this SP onto the stack and then write 1000 // the same SP back to m->sched.sp. That seems redundant, 1001 // but if an unrecovered panic happens, unwindm will 1002 // restore the g->sched.sp from the stack location 1003 // and then systemstack will try to use it. If we don't set it here, 1004 // that restored SP will be uninitialized (typically 0) and 1005 // will not be usable. 1006 MOVD g_m(g), R8 1007 MOVD m_g0(R8), R3 1008 MOVD RSP, R0 1009 MOVD R0, (g_sched+gobuf_sp)(R3) 1010 MOVD R29, (g_sched+gobuf_bp)(R3) 1011 1012 havem: 1013 // Now there's a valid m, and we're running on its m->g0. 1014 // Save current m->g0->sched.sp on stack and then set it to SP. 1015 // Save current sp in m->g0->sched.sp in preparation for 1016 // switch back to m->curg stack. 1017 // NOTE: unwindm knows that the saved g->sched.sp is at 16(RSP) aka savedsp-16(SP). 1018 // Beware that the frame size is actually 32+16. 1019 MOVD m_g0(R8), R3 1020 MOVD (g_sched+gobuf_sp)(R3), R4 1021 MOVD R4, savedsp-16(SP) 1022 MOVD RSP, R0 1023 MOVD R0, (g_sched+gobuf_sp)(R3) 1024 1025 // Switch to m->curg stack and call runtime.cgocallbackg. 1026 // Because we are taking over the execution of m->curg 1027 // but *not* resuming what had been running, we need to 1028 // save that information (m->curg->sched) so we can restore it. 1029 // We can restore m->curg->sched.sp easily, because calling 1030 // runtime.cgocallbackg leaves SP unchanged upon return. 1031 // To save m->curg->sched.pc, we push it onto the stack. 1032 // This has the added benefit that it looks to the traceback 1033 // routine like cgocallbackg is going to return to that 1034 // PC (because the frame we allocate below has the same 1035 // size as cgocallback_gofunc's frame declared above) 1036 // so that the traceback will seamlessly trace back into 1037 // the earlier calls. 1038 // 1039 // In the new goroutine, -8(SP) is unused (where SP refers to 1040 // m->curg's SP while we're setting it up, before we've adjusted it). 1041 MOVD m_curg(R8), g 1042 BL runtime·save_g(SB) 1043 MOVD (g_sched+gobuf_sp)(g), R4 // prepare stack as R4 1044 MOVD (g_sched+gobuf_pc)(g), R5 1045 MOVD R5, -48(R4) 1046 MOVD (g_sched+gobuf_bp)(g), R5 1047 MOVD R5, -56(R4) 1048 MOVD ctxt+24(FP), R0 1049 MOVD R0, -40(R4) 1050 MOVD $-48(R4), R0 // maintain 16-byte SP alignment 1051 MOVD R0, RSP 1052 BL runtime·cgocallbackg(SB) 1053 1054 // Restore g->sched (== m->curg->sched) from saved values. 1055 MOVD 0(RSP), R5 1056 MOVD R5, (g_sched+gobuf_pc)(g) 1057 MOVD RSP, R4 1058 ADD $48, R4, R4 1059 MOVD R4, (g_sched+gobuf_sp)(g) 1060 1061 // Switch back to m->g0's stack and restore m->g0->sched.sp. 1062 // (Unlike m->curg, the g0 goroutine never uses sched.pc, 1063 // so we do not have to restore it.) 1064 MOVD g_m(g), R8 1065 MOVD m_g0(R8), g 1066 BL runtime·save_g(SB) 1067 MOVD (g_sched+gobuf_sp)(g), R0 1068 MOVD R0, RSP 1069 MOVD savedsp-16(SP), R4 1070 MOVD R4, (g_sched+gobuf_sp)(g) 1071 1072 // If the m on entry was nil, we called needm above to borrow an m 1073 // for the duration of the call. Since the call is over, return it with dropm. 1074 MOVD savedm-8(SP), R6 1075 CMP $0, R6 1076 BNE droppedm 1077 MOVD $runtime·dropm(SB), R0 1078 BL (R0) 1079 droppedm: 1080 1081 // Done! 1082 RET 1083 1084 // Called from cgo wrappers, this function returns g->m->curg.stack.hi. 1085 // Must obey the gcc calling convention. 1086 TEXT _cgo_topofstack(SB),NOSPLIT,$24 1087 // g (R28) and REGTMP (R27) might be clobbered by load_g. They 1088 // are callee-save in the gcc calling convention, so save them. 1089 MOVD R27, savedR27-8(SP) 1090 MOVD g, saveG-16(SP) 1091 1092 BL runtime·load_g(SB) 1093 MOVD g_m(g), R0 1094 MOVD m_curg(R0), R0 1095 MOVD (g_stack+stack_hi)(R0), R0 1096 1097 MOVD saveG-16(SP), g 1098 MOVD savedR28-8(SP), R27 1099 RET 1100 1101 // void setg(G*); set g. for use by needm. 1102 TEXT runtime·setg(SB), NOSPLIT, $0-8 1103 MOVD gg+0(FP), g 1104 // This only happens if iscgo, so jump straight to save_g 1105 BL runtime·save_g(SB) 1106 RET 1107 1108 // void setg_gcc(G*); set g called from gcc 1109 TEXT setg_gcc<>(SB),NOSPLIT,$8 1110 MOVD R0, g 1111 MOVD R27, savedR27-8(SP) 1112 BL runtime·save_g(SB) 1113 MOVD savedR27-8(SP), R27 1114 RET 1115 1116 TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 1117 MOVD ZR, R0 1118 MOVD (R0), R0 1119 UNDEF 1120 1121 TEXT runtime·return0(SB), NOSPLIT, $0 1122 MOVW $0, R0 1123 RET 1124 1125 // The top-most function running on a goroutine 1126 // returns to goexit+PCQuantum. 1127 TEXT runtime·goexit(SB),NOSPLIT|NOFRAME,$0-0 1128 MOVD R0, R0 // NOP 1129 BL runtime·goexit1(SB) // does not return 1130 1131 TEXT runtime·sigreturn(SB),NOSPLIT,$0-0 1132 RET 1133 1134 // This is called from .init_array and follows the platform, not Go, ABI. 1135 TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0 1136 SUB $0x10, RSP 1137 MOVD R27, 8(RSP) // The access to global variables below implicitly uses R27, which is callee-save 1138 MOVD runtime·lastmoduledatap(SB), R1 1139 MOVD R0, moduledata_next(R1) 1140 MOVD R0, runtime·lastmoduledatap(SB) 1141 MOVD 8(RSP), R27 1142 ADD $0x10, RSP 1143 RET 1144 1145 TEXT ·checkASM(SB),NOSPLIT,$0-1 1146 MOVW $1, R3 1147 MOVB R3, ret+0(FP) 1148 RET 1149 1150 // gcWriteBarrier performs a heap pointer write and informs the GC. 1151 // 1152 // gcWriteBarrier does NOT follow the Go ABI. It takes two arguments: 1153 // - R2 is the destination of the write 1154 // - R3 is the value being written at R2 1155 // It clobbers condition codes. 1156 // It does not clobber any general-purpose registers, 1157 // but may clobber others (e.g., floating point registers) 1158 // The act of CALLing gcWriteBarrier will clobber R30 (LR). 1159 TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$216 1160 // Save the registers clobbered by the fast path. 1161 MOVD R0, 200(RSP) 1162 MOVD R1, 208(RSP) 1163 MOVD g_m(g), R0 1164 MOVD m_p(R0), R0 1165 MOVD (p_wbBuf+wbBuf_next)(R0), R1 1166 // Increment wbBuf.next position. 1167 ADD $16, R1 1168 MOVD R1, (p_wbBuf+wbBuf_next)(R0) 1169 MOVD (p_wbBuf+wbBuf_end)(R0), R0 1170 CMP R1, R0 1171 // Record the write. 1172 MOVD R3, -16(R1) // Record value 1173 MOVD (R2), R0 // TODO: This turns bad writes into bad reads. 1174 MOVD R0, -8(R1) // Record *slot 1175 // Is the buffer full? (flags set in CMP above) 1176 BEQ flush 1177 ret: 1178 MOVD 200(RSP), R0 1179 MOVD 208(RSP), R1 1180 // Do the write. 1181 MOVD R3, (R2) 1182 RET 1183 1184 flush: 1185 // Save all general purpose registers since these could be 1186 // clobbered by wbBufFlush and were not saved by the caller. 1187 MOVD R2, 8(RSP) // Also first argument to wbBufFlush 1188 MOVD R3, 16(RSP) // Also second argument to wbBufFlush 1189 // R0 already saved 1190 // R1 already saved 1191 MOVD R4, 24(RSP) 1192 MOVD R5, 32(RSP) 1193 MOVD R6, 40(RSP) 1194 MOVD R7, 48(RSP) 1195 MOVD R8, 56(RSP) 1196 MOVD R9, 64(RSP) 1197 MOVD R10, 72(RSP) 1198 MOVD R11, 80(RSP) 1199 MOVD R12, 88(RSP) 1200 MOVD R13, 96(RSP) 1201 MOVD R14, 104(RSP) 1202 MOVD R15, 112(RSP) 1203 MOVD R16, 120(RSP) 1204 MOVD R17, 128(RSP) 1205 // R18 is unused. 1206 MOVD R19, 136(RSP) 1207 MOVD R20, 144(RSP) 1208 MOVD R21, 152(RSP) 1209 MOVD R22, 160(RSP) 1210 MOVD R23, 168(RSP) 1211 MOVD R24, 176(RSP) 1212 MOVD R25, 184(RSP) 1213 MOVD R26, 192(RSP) 1214 // R27 is temp register. 1215 // R28 is g. 1216 // R29 is frame pointer (unused). 1217 // R30 is LR, which was saved by the prologue. 1218 // R31 is SP. 1219 1220 // This takes arguments R2 and R3. 1221 CALL runtime·wbBufFlush(SB) 1222 1223 MOVD 8(RSP), R2 1224 MOVD 16(RSP), R3 1225 MOVD 24(RSP), R4 1226 MOVD 32(RSP), R5 1227 MOVD 40(RSP), R6 1228 MOVD 48(RSP), R7 1229 MOVD 56(RSP), R8 1230 MOVD 64(RSP), R9 1231 MOVD 72(RSP), R10 1232 MOVD 80(RSP), R11 1233 MOVD 88(RSP), R12 1234 MOVD 96(RSP), R13 1235 MOVD 104(RSP), R14 1236 MOVD 112(RSP), R15 1237 MOVD 120(RSP), R16 1238 MOVD 128(RSP), R17 1239 MOVD 136(RSP), R19 1240 MOVD 144(RSP), R20 1241 MOVD 152(RSP), R21 1242 MOVD 160(RSP), R22 1243 MOVD 168(RSP), R23 1244 MOVD 176(RSP), R24 1245 MOVD 184(RSP), R25 1246 MOVD 192(RSP), R26 1247 JMP ret