github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/asm_riscv64.s (about) 1 // Copyright 2017 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 #include "go_asm.h" 6 #include "funcdata.h" 7 #include "textflag.h" 8 9 // func rt0_go() 10 TEXT runtime·rt0_go(SB),NOSPLIT,$0 11 // X2 = stack; A0 = argc; A1 = argv 12 ADD $-24, X2 13 MOV A0, 8(X2) // argc 14 MOV A1, 16(X2) // argv 15 16 // create istack out of the given (operating system) stack. 17 // _cgo_init may update stackguard. 18 MOV $runtime·g0(SB), g 19 MOV $(-64*1024), T0 20 ADD T0, X2, T1 21 MOV T1, g_stackguard0(g) 22 MOV T1, g_stackguard1(g) 23 MOV T1, (g_stack+stack_lo)(g) 24 MOV X2, (g_stack+stack_hi)(g) 25 26 // if there is a _cgo_init, call it using the gcc ABI. 27 MOV _cgo_init(SB), T0 28 BEQ T0, ZERO, nocgo 29 30 MOV ZERO, A3 // arg 3: not used 31 MOV ZERO, A2 // arg 2: not used 32 MOV $setg_gcc<>(SB), A1 // arg 1: setg 33 MOV g, A0 // arg 0: G 34 JALR RA, T0 35 36 nocgo: 37 // update stackguard after _cgo_init 38 MOV (g_stack+stack_lo)(g), T0 39 ADD $const__StackGuard, T0 40 MOV T0, g_stackguard0(g) 41 MOV T0, g_stackguard1(g) 42 43 // set the per-goroutine and per-mach "registers" 44 MOV $runtime·m0(SB), T0 45 46 // save m->g0 = g0 47 MOV g, m_g0(T0) 48 // save m0 to g0->m 49 MOV T0, g_m(g) 50 51 CALL runtime·check(SB) 52 53 // args are already prepared 54 CALL runtime·args(SB) 55 CALL runtime·osinit(SB) 56 CALL runtime·schedinit(SB) 57 58 // create a new goroutine to start program 59 MOV $runtime·mainPC(SB), T0 // entry 60 ADD $-24, X2 61 MOV T0, 16(X2) 62 MOV ZERO, 8(X2) 63 MOV ZERO, 0(X2) 64 CALL runtime·newproc(SB) 65 ADD $24, X2 66 67 // start this M 68 CALL runtime·mstart(SB) 69 70 WORD $0 // crash if reached 71 RET 72 73 // void setg_gcc(G*); set g called from gcc with g in A0 74 TEXT setg_gcc<>(SB),NOSPLIT,$0-0 75 MOV A0, g 76 CALL runtime·save_g(SB) 77 RET 78 79 // func cputicks() int64 80 TEXT runtime·cputicks(SB),NOSPLIT,$0-8 81 RDTIME A0 82 MOV A0, ret+0(FP) 83 RET 84 85 // systemstack_switch is a dummy routine that systemstack leaves at the bottom 86 // of the G stack. We need to distinguish the routine that 87 // lives at the bottom of the G stack from the one that lives 88 // at the top of the system stack because the one at the top of 89 // the system stack terminates the stack walk (see topofstack()). 90 TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 91 UNDEF 92 JALR RA, ZERO // make sure this function is not leaf 93 RET 94 95 // func systemstack(fn func()) 96 TEXT runtime·systemstack(SB), NOSPLIT, $0-8 97 MOV fn+0(FP), CTXT // CTXT = fn 98 MOV g_m(g), T0 // T0 = m 99 100 MOV m_gsignal(T0), T1 // T1 = gsignal 101 BEQ g, T1, noswitch 102 103 MOV m_g0(T0), T1 // T1 = g0 104 BEQ g, T1, noswitch 105 106 MOV m_curg(T0), T2 107 BEQ g, T2, switch 108 109 // Bad: g is not gsignal, not g0, not curg. What is it? 110 // Hide call from linker nosplit analysis. 111 MOV $runtime·badsystemstack(SB), T1 112 JALR RA, T1 113 114 switch: 115 // save our state in g->sched. Pretend to 116 // be systemstack_switch if the G stack is scanned. 117 MOV $runtime·systemstack_switch(SB), T2 118 ADD $8, T2 // get past prologue 119 MOV T2, (g_sched+gobuf_pc)(g) 120 MOV X2, (g_sched+gobuf_sp)(g) 121 MOV ZERO, (g_sched+gobuf_lr)(g) 122 MOV g, (g_sched+gobuf_g)(g) 123 124 // switch to g0 125 MOV T1, g 126 CALL runtime·save_g(SB) 127 MOV (g_sched+gobuf_sp)(g), T0 128 // make it look like mstart called systemstack on g0, to stop traceback 129 ADD $-8, T0 130 MOV $runtime·mstart(SB), T1 131 MOV T1, 0(T0) 132 MOV T0, X2 133 134 // call target function 135 MOV 0(CTXT), T1 // code pointer 136 JALR RA, T1 137 138 // switch back to g 139 MOV g_m(g), T0 140 MOV m_curg(T0), g 141 CALL runtime·save_g(SB) 142 MOV (g_sched+gobuf_sp)(g), X2 143 MOV ZERO, (g_sched+gobuf_sp)(g) 144 RET 145 146 noswitch: 147 // already on m stack, just call directly 148 // Using a tail call here cleans up tracebacks since we won't stop 149 // at an intermediate systemstack. 150 MOV 0(CTXT), T1 // code pointer 151 ADD $8, X2 152 JMP (T1) 153 154 TEXT runtime·getcallerpc(SB),NOSPLIT|NOFRAME,$0-8 155 MOV 0(X2), T0 // LR saved by caller 156 MOV T0, ret+0(FP) 157 RET 158 159 /* 160 * support for morestack 161 */ 162 163 // Called during function prolog when more stack is needed. 164 // Caller has already loaded: 165 // R1: framesize, R2: argsize, R3: LR 166 // 167 // The traceback routines see morestack on a g0 as being 168 // the top of a stack (for example, morestack calling newstack 169 // calling the scheduler calling newm calling gc), so we must 170 // record an argument size. For that purpose, it has no arguments. 171 172 // func morestack() 173 TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 174 // Cannot grow scheduler stack (m->g0). 175 MOV g_m(g), A0 176 MOV m_g0(A0), A1 177 BNE g, A1, 3(PC) 178 CALL runtime·badmorestackg0(SB) 179 CALL runtime·abort(SB) 180 181 // Cannot grow signal stack (m->gsignal). 182 MOV m_gsignal(A0), A1 183 BNE g, A1, 3(PC) 184 CALL runtime·badmorestackgsignal(SB) 185 CALL runtime·abort(SB) 186 187 // Called from f. 188 // Set g->sched to context in f. 189 MOV X2, (g_sched+gobuf_sp)(g) 190 MOV T0, (g_sched+gobuf_pc)(g) 191 MOV RA, (g_sched+gobuf_lr)(g) 192 MOV CTXT, (g_sched+gobuf_ctxt)(g) 193 194 // Called from f. 195 // Set m->morebuf to f's caller. 196 MOV RA, (m_morebuf+gobuf_pc)(A0) // f's caller's PC 197 MOV X2, (m_morebuf+gobuf_sp)(A0) // f's caller's SP 198 MOV g, (m_morebuf+gobuf_g)(A0) 199 200 // Call newstack on m->g0's stack. 201 MOV m_g0(A0), g 202 CALL runtime·save_g(SB) 203 MOV (g_sched+gobuf_sp)(g), X2 204 // Create a stack frame on g0 to call newstack. 205 MOV ZERO, -8(X2) // Zero saved LR in frame 206 ADD $-8, X2 207 CALL runtime·newstack(SB) 208 209 // Not reached, but make sure the return PC from the call to newstack 210 // is still in this function, and not the beginning of the next. 211 UNDEF 212 213 // func morestack_noctxt() 214 TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 215 MOV ZERO, CTXT 216 JMP runtime·morestack(SB) 217 218 // AES hashing not implemented for riscv64 219 TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 220 JMP runtime·memhashFallback(SB) 221 TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 222 JMP runtime·strhashFallback(SB) 223 TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 224 JMP runtime·memhash32Fallback(SB) 225 TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 226 JMP runtime·memhash64Fallback(SB) 227 228 // func return0() 229 TEXT runtime·return0(SB), NOSPLIT, $0 230 MOV $0, A0 231 RET 232 233 // restore state from Gobuf; longjmp 234 235 // func gogo(buf *gobuf) 236 TEXT runtime·gogo(SB), NOSPLIT, $16-8 237 MOV buf+0(FP), T0 238 MOV gobuf_g(T0), g // make sure g is not nil 239 CALL runtime·save_g(SB) 240 241 MOV (g), ZERO // make sure g is not nil 242 MOV gobuf_sp(T0), X2 243 MOV gobuf_lr(T0), RA 244 MOV gobuf_ret(T0), A0 245 MOV gobuf_ctxt(T0), CTXT 246 MOV ZERO, gobuf_sp(T0) 247 MOV ZERO, gobuf_ret(T0) 248 MOV ZERO, gobuf_lr(T0) 249 MOV ZERO, gobuf_ctxt(T0) 250 MOV gobuf_pc(T0), T0 251 JALR ZERO, T0 252 253 // func jmpdefer(fv *funcval, argp uintptr) 254 // called from deferreturn 255 // 1. grab stored return address from the caller's frame 256 // 2. sub 8 bytes to get back to JAL deferreturn 257 // 3. JMP to fn 258 TEXT runtime·jmpdefer(SB), NOSPLIT|NOFRAME, $0-16 259 MOV 0(X2), RA 260 ADD $-8, RA 261 262 MOV fv+0(FP), CTXT 263 MOV argp+8(FP), X2 264 ADD $-8, X2 265 MOV 0(CTXT), T0 266 JALR ZERO, T0 267 268 // func procyield(cycles uint32) 269 TEXT runtime·procyield(SB),NOSPLIT,$0-0 270 RET 271 272 // Switch to m->g0's stack, call fn(g). 273 // Fn must never return. It should gogo(&g->sched) 274 // to keep running g. 275 276 // func mcall(fn func(*g)) 277 TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 278 // Save caller state in g->sched 279 MOV X2, (g_sched+gobuf_sp)(g) 280 MOV RA, (g_sched+gobuf_pc)(g) 281 MOV ZERO, (g_sched+gobuf_lr)(g) 282 MOV g, (g_sched+gobuf_g)(g) 283 284 // Switch to m->g0 & its stack, call fn. 285 MOV g, T0 286 MOV g_m(g), T1 287 MOV m_g0(T1), g 288 CALL runtime·save_g(SB) 289 BNE g, T0, 2(PC) 290 JMP runtime·badmcall(SB) 291 MOV fn+0(FP), CTXT // context 292 MOV 0(CTXT), T1 // code pointer 293 MOV (g_sched+gobuf_sp)(g), X2 // sp = m->g0->sched.sp 294 ADD $-16, X2 295 MOV T0, 8(X2) 296 MOV ZERO, 0(X2) 297 JALR RA, T1 298 JMP runtime·badmcall2(SB) 299 300 // func gosave(buf *gobuf) 301 // save state in Gobuf; setjmp 302 TEXT runtime·gosave(SB), NOSPLIT|NOFRAME, $0-8 303 MOV buf+0(FP), T1 304 MOV X2, gobuf_sp(T1) 305 MOV RA, gobuf_pc(T1) 306 MOV g, gobuf_g(T1) 307 MOV ZERO, gobuf_lr(T1) 308 MOV ZERO, gobuf_ret(T1) 309 // Assert ctxt is zero. See func save. 310 MOV gobuf_ctxt(T1), T1 311 BEQ T1, ZERO, 2(PC) 312 CALL runtime·badctxt(SB) 313 RET 314 315 // Save state of caller into g->sched. Smashes X31. 316 TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0 317 MOV X1, (g_sched+gobuf_pc)(g) 318 MOV X2, (g_sched+gobuf_sp)(g) 319 MOV ZERO, (g_sched+gobuf_lr)(g) 320 MOV ZERO, (g_sched+gobuf_ret)(g) 321 // Assert ctxt is zero. See func save. 322 MOV (g_sched+gobuf_ctxt)(g), X31 323 BEQ ZERO, X31, 2(PC) 324 CALL runtime·badctxt(SB) 325 RET 326 327 // func asmcgocall(fn, arg unsafe.Pointer) int32 328 // Call fn(arg) on the scheduler stack, 329 // aligned appropriately for the gcc ABI. 330 // See cgocall.go for more details. 331 TEXT ·asmcgocall(SB),NOSPLIT,$0-20 332 MOV fn+0(FP), X5 333 MOV arg+8(FP), X10 334 335 MOV X2, X8 // save original stack pointer 336 MOV g, X9 337 338 // Figure out if we need to switch to m->g0 stack. 339 // We get called to create new OS threads too, and those 340 // come in on the m->g0 stack already. 341 MOV g_m(g), X6 342 MOV m_g0(X6), X7 343 BEQ X7, g, g0 344 345 CALL gosave<>(SB) 346 MOV X7, g 347 CALL runtime·save_g(SB) 348 MOV (g_sched+gobuf_sp)(g), X2 349 350 // Now on a scheduling stack (a pthread-created stack). 351 g0: 352 // Save room for two of our pointers. 353 ADD $-16, X2 354 MOV X9, 0(X2) // save old g on stack 355 MOV (g_stack+stack_hi)(X9), X9 356 SUB X8, X9, X8 357 MOV X8, 8(X2) // save depth in old g stack (can't just save SP, as stack might be copied during a callback) 358 359 JALR RA, (X5) 360 361 // Restore g, stack pointer. X10 is return value. 362 MOV 0(X2), g 363 CALL runtime·save_g(SB) 364 MOV (g_stack+stack_hi)(g), X5 365 MOV 8(X2), X6 366 SUB X6, X5, X6 367 MOV X6, X2 368 369 MOVW X10, ret+16(FP) 370 RET 371 372 // func asminit() 373 TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0 374 RET 375 376 // reflectcall: call a function with the given argument list 377 // func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). 378 // we don't have variable-sized frames, so we use a small number 379 // of constant-sized-frame functions to encode a few bits of size in the pc. 380 // Caution: ugly multiline assembly macros in your future! 381 382 #define DISPATCH(NAME,MAXSIZE) \ 383 MOV $MAXSIZE, T1 \ 384 BLTU T1, T0, 3(PC) \ 385 MOV $NAME(SB), T2; \ 386 JALR ZERO, T2 387 // Note: can't just "BR NAME(SB)" - bad inlining results. 388 389 // func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset uint32) 390 TEXT reflect·call(SB), NOSPLIT, $0-0 391 JMP ·reflectcall(SB) 392 393 // func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32) 394 TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-32 395 MOVWU argsize+24(FP), T0 396 DISPATCH(runtime·call16, 16) 397 DISPATCH(runtime·call32, 32) 398 DISPATCH(runtime·call64, 64) 399 DISPATCH(runtime·call128, 128) 400 DISPATCH(runtime·call256, 256) 401 DISPATCH(runtime·call512, 512) 402 DISPATCH(runtime·call1024, 1024) 403 DISPATCH(runtime·call2048, 2048) 404 DISPATCH(runtime·call4096, 4096) 405 DISPATCH(runtime·call8192, 8192) 406 DISPATCH(runtime·call16384, 16384) 407 DISPATCH(runtime·call32768, 32768) 408 DISPATCH(runtime·call65536, 65536) 409 DISPATCH(runtime·call131072, 131072) 410 DISPATCH(runtime·call262144, 262144) 411 DISPATCH(runtime·call524288, 524288) 412 DISPATCH(runtime·call1048576, 1048576) 413 DISPATCH(runtime·call2097152, 2097152) 414 DISPATCH(runtime·call4194304, 4194304) 415 DISPATCH(runtime·call8388608, 8388608) 416 DISPATCH(runtime·call16777216, 16777216) 417 DISPATCH(runtime·call33554432, 33554432) 418 DISPATCH(runtime·call67108864, 67108864) 419 DISPATCH(runtime·call134217728, 134217728) 420 DISPATCH(runtime·call268435456, 268435456) 421 DISPATCH(runtime·call536870912, 536870912) 422 DISPATCH(runtime·call1073741824, 1073741824) 423 MOV $runtime·badreflectcall(SB), T2 424 JALR ZERO, T2 425 426 #define CALLFN(NAME,MAXSIZE) \ 427 TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ 428 NO_LOCAL_POINTERS; \ 429 /* copy arguments to stack */ \ 430 MOV arg+16(FP), A1; \ 431 MOVWU argsize+24(FP), A2; \ 432 MOV X2, A3; \ 433 ADD $8, A3; \ 434 ADD A3, A2; \ 435 BEQ A3, A2, 6(PC); \ 436 MOVBU (A1), A4; \ 437 ADD $1, A1; \ 438 MOVB A4, (A3); \ 439 ADD $1, A3; \ 440 JMP -5(PC); \ 441 /* call function */ \ 442 MOV f+8(FP), CTXT; \ 443 MOV (CTXT), A4; \ 444 PCDATA $PCDATA_StackMapIndex, $0; \ 445 JALR RA, A4; \ 446 /* copy return values back */ \ 447 MOV argtype+0(FP), A5; \ 448 MOV arg+16(FP), A1; \ 449 MOVWU n+24(FP), A2; \ 450 MOVWU retoffset+28(FP), A4; \ 451 ADD $8, X2, A3; \ 452 ADD A4, A3; \ 453 ADD A4, A1; \ 454 SUB A4, A2; \ 455 CALL callRet<>(SB); \ 456 RET 457 458 // callRet copies return values back at the end of call*. This is a 459 // separate function so it can allocate stack space for the arguments 460 // to reflectcallmove. It does not follow the Go ABI; it expects its 461 // arguments in registers. 462 TEXT callRet<>(SB), NOSPLIT, $32-0 463 MOV A5, 8(X2) 464 MOV A1, 16(X2) 465 MOV A3, 24(X2) 466 MOV A2, 32(X2) 467 CALL runtime·reflectcallmove(SB) 468 RET 469 470 CALLFN(·call16, 16) 471 CALLFN(·call32, 32) 472 CALLFN(·call64, 64) 473 CALLFN(·call128, 128) 474 CALLFN(·call256, 256) 475 CALLFN(·call512, 512) 476 CALLFN(·call1024, 1024) 477 CALLFN(·call2048, 2048) 478 CALLFN(·call4096, 4096) 479 CALLFN(·call8192, 8192) 480 CALLFN(·call16384, 16384) 481 CALLFN(·call32768, 32768) 482 CALLFN(·call65536, 65536) 483 CALLFN(·call131072, 131072) 484 CALLFN(·call262144, 262144) 485 CALLFN(·call524288, 524288) 486 CALLFN(·call1048576, 1048576) 487 CALLFN(·call2097152, 2097152) 488 CALLFN(·call4194304, 4194304) 489 CALLFN(·call8388608, 8388608) 490 CALLFN(·call16777216, 16777216) 491 CALLFN(·call33554432, 33554432) 492 CALLFN(·call67108864, 67108864) 493 CALLFN(·call134217728, 134217728) 494 CALLFN(·call268435456, 268435456) 495 CALLFN(·call536870912, 536870912) 496 CALLFN(·call1073741824, 1073741824) 497 498 // Called from cgo wrappers, this function returns g->m->curg.stack.hi. 499 // Must obey the gcc calling convention. 500 TEXT _cgo_topofstack(SB),NOSPLIT,$8 501 // g (X27) and REG_TMP (X31) might be clobbered by load_g. 502 // X27 is callee-save in the gcc calling convention, so save it. 503 MOV g, savedX27-8(SP) 504 505 CALL runtime·load_g(SB) 506 MOV g_m(g), X5 507 MOV m_curg(X5), X5 508 MOV (g_stack+stack_hi)(X5), X10 // return value in X10 509 510 MOV savedX27-8(SP), g 511 RET 512 513 // func goexit(neverCallThisFunction) 514 // The top-most function running on a goroutine 515 // returns to goexit+PCQuantum. 516 TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 517 MOV ZERO, ZERO // NOP 518 JMP runtime·goexit1(SB) // does not return 519 // traceback from goexit1 must hit code range of goexit 520 MOV ZERO, ZERO // NOP 521 522 // func cgocallback(fn, frame unsafe.Pointer, ctxt uintptr) 523 // See cgocall.go for more details. 524 TEXT ·cgocallback(SB),NOSPLIT,$24-24 525 NO_LOCAL_POINTERS 526 527 // Load m and g from thread-local storage. 528 MOVBU runtime·iscgo(SB), X5 529 BEQ ZERO, X5, nocgo 530 CALL runtime·load_g(SB) 531 nocgo: 532 533 // If g is nil, Go did not create the current thread. 534 // Call needm to obtain one for temporary use. 535 // In this case, we're running on the thread stack, so there's 536 // lots of space, but the linker doesn't know. Hide the call from 537 // the linker analysis by using an indirect call. 538 BEQ ZERO, g, needm 539 540 MOV g_m(g), X5 541 MOV X5, savedm-8(SP) 542 JMP havem 543 544 needm: 545 MOV g, savedm-8(SP) // g is zero, so is m. 546 MOV $runtime·needm(SB), X6 547 JALR RA, X6 548 549 // Set m->sched.sp = SP, so that if a panic happens 550 // during the function we are about to execute, it will 551 // have a valid SP to run on the g0 stack. 552 // The next few lines (after the havem label) 553 // will save this SP onto the stack and then write 554 // the same SP back to m->sched.sp. That seems redundant, 555 // but if an unrecovered panic happens, unwindm will 556 // restore the g->sched.sp from the stack location 557 // and then systemstack will try to use it. If we don't set it here, 558 // that restored SP will be uninitialized (typically 0) and 559 // will not be usable. 560 MOV g_m(g), X5 561 MOV m_g0(X5), X6 562 MOV X2, (g_sched+gobuf_sp)(X6) 563 564 havem: 565 // Now there's a valid m, and we're running on its m->g0. 566 // Save current m->g0->sched.sp on stack and then set it to SP. 567 // Save current sp in m->g0->sched.sp in preparation for 568 // switch back to m->curg stack. 569 // NOTE: unwindm knows that the saved g->sched.sp is at 8(X2) aka savedsp-24(SP). 570 MOV m_g0(X5), X6 571 MOV (g_sched+gobuf_sp)(X6), X7 572 MOV X7, savedsp-24(SP) // must match frame size 573 MOV X2, (g_sched+gobuf_sp)(X6) 574 575 // Switch to m->curg stack and call runtime.cgocallbackg. 576 // Because we are taking over the execution of m->curg 577 // but *not* resuming what had been running, we need to 578 // save that information (m->curg->sched) so we can restore it. 579 // We can restore m->curg->sched.sp easily, because calling 580 // runtime.cgocallbackg leaves SP unchanged upon return. 581 // To save m->curg->sched.pc, we push it onto the curg stack and 582 // open a frame the same size as cgocallback's g0 frame. 583 // Once we switch to the curg stack, the pushed PC will appear 584 // to be the return PC of cgocallback, so that the traceback 585 // will seamlessly trace back into the earlier calls. 586 MOV m_curg(X5), g 587 CALL runtime·save_g(SB) 588 MOV (g_sched+gobuf_sp)(g), X6 // prepare stack as X6 589 MOV (g_sched+gobuf_pc)(g), X7 590 MOV X7, -(24+8)(X6) // "saved LR"; must match frame size 591 // Gather our arguments into registers. 592 MOV fn+0(FP), X7 593 MOV frame+8(FP), X8 594 MOV ctxt+16(FP), X9 595 MOV $-(24+8)(X6), X2 // switch stack; must match frame size 596 MOV X7, 8(X2) 597 MOV X8, 16(X2) 598 MOV X9, 24(X2) 599 CALL runtime·cgocallbackg(SB) 600 601 // Restore g->sched (== m->curg->sched) from saved values. 602 MOV 0(X2), X7 603 MOV X7, (g_sched+gobuf_pc)(g) 604 MOV $(24+8)(X2), X6 // must match frame size 605 MOV X6, (g_sched+gobuf_sp)(g) 606 607 // Switch back to m->g0's stack and restore m->g0->sched.sp. 608 // (Unlike m->curg, the g0 goroutine never uses sched.pc, 609 // so we do not have to restore it.) 610 MOV g_m(g), X5 611 MOV m_g0(X5), g 612 CALL runtime·save_g(SB) 613 MOV (g_sched+gobuf_sp)(g), X2 614 MOV savedsp-24(SP), X6 // must match frame size 615 MOV X6, (g_sched+gobuf_sp)(g) 616 617 // If the m on entry was nil, we called needm above to borrow an m 618 // for the duration of the call. Since the call is over, return it with dropm. 619 MOV savedm-8(SP), X5 620 BNE ZERO, X5, droppedm 621 MOV $runtime·dropm(SB), X6 622 JALR RA, X6 623 droppedm: 624 625 // Done! 626 RET 627 628 TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 629 EBREAK 630 RET 631 632 TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 633 EBREAK 634 RET 635 636 // void setg(G*); set g. for use by needm. 637 TEXT runtime·setg(SB), NOSPLIT, $0-8 638 MOV gg+0(FP), g 639 // This only happens if iscgo, so jump straight to save_g 640 CALL runtime·save_g(SB) 641 RET 642 643 TEXT ·checkASM(SB),NOSPLIT,$0-1 644 MOV $1, T0 645 MOV T0, ret+0(FP) 646 RET 647 648 // gcWriteBarrier performs a heap pointer write and informs the GC. 649 // 650 // gcWriteBarrier does NOT follow the Go ABI. It takes two arguments: 651 // - T0 is the destination of the write 652 // - T1 is the value being written at T0. 653 // It clobbers R30 (the linker temp register - REG_TMP). 654 // The act of CALLing gcWriteBarrier will clobber RA (LR). 655 // It does not clobber any other general-purpose registers, 656 // but may clobber others (e.g., floating point registers). 657 TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$216 658 // Save the registers clobbered by the fast path. 659 MOV A0, 25*8(X2) 660 MOV A1, 26*8(X2) 661 MOV g_m(g), A0 662 MOV m_p(A0), A0 663 MOV (p_wbBuf+wbBuf_next)(A0), A1 664 // Increment wbBuf.next position. 665 ADD $16, A1 666 MOV A1, (p_wbBuf+wbBuf_next)(A0) 667 MOV (p_wbBuf+wbBuf_end)(A0), A0 668 MOV A0, T6 // T6 is linker temp register (REG_TMP) 669 // Record the write. 670 MOV T1, -16(A1) // Record value 671 MOV (T0), A0 // TODO: This turns bad writes into bad reads. 672 MOV A0, -8(A1) // Record *slot 673 // Is the buffer full? 674 BEQ A1, T6, flush 675 ret: 676 MOV 25*8(X2), A0 677 MOV 26*8(X2), A1 678 // Do the write. 679 MOV T1, (T0) 680 RET 681 682 flush: 683 // Save all general purpose registers since these could be 684 // clobbered by wbBufFlush and were not saved by the caller. 685 MOV T0, 1*8(X2) // Also first argument to wbBufFlush 686 MOV T1, 2*8(X2) // Also second argument to wbBufFlush 687 // X0 is zero register 688 // X1 is LR, saved by prologue 689 // X2 is SP 690 MOV X3, 3*8(X2) 691 // X4 is TP 692 // X5 is first arg to wbBufFlush (T0) 693 // X6 is second arg to wbBufFlush (T1) 694 MOV X7, 4*8(X2) 695 MOV X8, 5*8(X2) 696 MOV X9, 6*8(X2) 697 // X10 already saved (A0) 698 // X11 already saved (A1) 699 MOV X12, 7*8(X2) 700 MOV X13, 8*8(X2) 701 MOV X14, 9*8(X2) 702 MOV X15, 10*8(X2) 703 MOV X16, 11*8(X2) 704 MOV X17, 12*8(X2) 705 MOV X18, 13*8(X2) 706 MOV X19, 14*8(X2) 707 MOV X20, 15*8(X2) 708 MOV X21, 16*8(X2) 709 MOV X22, 17*8(X2) 710 MOV X23, 18*8(X2) 711 MOV X24, 19*8(X2) 712 MOV X25, 20*8(X2) 713 MOV X26, 21*8(X2) 714 // X27 is g. 715 MOV X28, 22*8(X2) 716 MOV X29, 23*8(X2) 717 MOV X30, 24*8(X2) 718 // X31 is tmp register. 719 720 // This takes arguments T0 and T1. 721 CALL runtime·wbBufFlush(SB) 722 723 MOV 1*8(X2), T0 724 MOV 2*8(X2), T1 725 MOV 3*8(X2), X3 726 MOV 4*8(X2), X7 727 MOV 5*8(X2), X8 728 MOV 6*8(X2), X9 729 MOV 7*8(X2), X12 730 MOV 8*8(X2), X13 731 MOV 9*8(X2), X14 732 MOV 10*8(X2), X15 733 MOV 11*8(X2), X16 734 MOV 12*8(X2), X17 735 MOV 13*8(X2), X18 736 MOV 14*8(X2), X19 737 MOV 15*8(X2), X20 738 MOV 16*8(X2), X21 739 MOV 17*8(X2), X22 740 MOV 18*8(X2), X23 741 MOV 19*8(X2), X24 742 MOV 20*8(X2), X25 743 MOV 21*8(X2), X26 744 MOV 22*8(X2), X28 745 MOV 23*8(X2), X29 746 MOV 24*8(X2), X30 747 748 JMP ret 749 750 // Note: these functions use a special calling convention to save generated code space. 751 // Arguments are passed in registers, but the space for those arguments are allocated 752 // in the caller's stack frame. These stubs write the args into that stack space and 753 // then tail call to the corresponding runtime handler. 754 // The tail call makes these stubs disappear in backtraces. 755 TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 756 MOV T0, x+0(FP) 757 MOV T1, y+8(FP) 758 JMP runtime·goPanicIndex(SB) 759 TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 760 MOV T0, x+0(FP) 761 MOV T1, y+8(FP) 762 JMP runtime·goPanicIndexU(SB) 763 TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 764 MOV T1, x+0(FP) 765 MOV T2, y+8(FP) 766 JMP runtime·goPanicSliceAlen(SB) 767 TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 768 MOV T1, x+0(FP) 769 MOV T2, y+8(FP) 770 JMP runtime·goPanicSliceAlenU(SB) 771 TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 772 MOV T1, x+0(FP) 773 MOV T2, y+8(FP) 774 JMP runtime·goPanicSliceAcap(SB) 775 TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 776 MOV T1, x+0(FP) 777 MOV T2, y+8(FP) 778 JMP runtime·goPanicSliceAcapU(SB) 779 TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 780 MOV T0, x+0(FP) 781 MOV T1, y+8(FP) 782 JMP runtime·goPanicSliceB(SB) 783 TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 784 MOV T0, x+0(FP) 785 MOV T1, y+8(FP) 786 JMP runtime·goPanicSliceBU(SB) 787 TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 788 MOV T2, x+0(FP) 789 MOV T3, y+8(FP) 790 JMP runtime·goPanicSlice3Alen(SB) 791 TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 792 MOV T2, x+0(FP) 793 MOV T3, y+8(FP) 794 JMP runtime·goPanicSlice3AlenU(SB) 795 TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 796 MOV T2, x+0(FP) 797 MOV T3, y+8(FP) 798 JMP runtime·goPanicSlice3Acap(SB) 799 TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 800 MOV T2, x+0(FP) 801 MOV T3, y+8(FP) 802 JMP runtime·goPanicSlice3AcapU(SB) 803 TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 804 MOV T1, x+0(FP) 805 MOV T2, y+8(FP) 806 JMP runtime·goPanicSlice3B(SB) 807 TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 808 MOV T1, x+0(FP) 809 MOV T2, y+8(FP) 810 JMP runtime·goPanicSlice3BU(SB) 811 TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 812 MOV T0, x+0(FP) 813 MOV T1, y+8(FP) 814 JMP runtime·goPanicSlice3C(SB) 815 TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 816 MOV T0, x+0(FP) 817 MOV T1, y+8(FP) 818 JMP runtime·goPanicSlice3CU(SB) 819 820 DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) 821 GLOBL runtime·mainPC(SB),RODATA,$8