github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/race_ppc64le.s (about) 1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build race 6 7 #include "go_asm.h" 8 #include "go_tls.h" 9 #include "funcdata.h" 10 #include "textflag.h" 11 #include "asm_ppc64x.h" 12 13 // The following functions allow calling the clang-compiled race runtime directly 14 // from Go code without going all the way through cgo. 15 // First, it's much faster (up to 50% speedup for real Go programs). 16 // Second, it eliminates race-related special cases from cgocall and scheduler. 17 // Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go. 18 19 // A brief recap of the ppc64le calling convention. 20 // Arguments are passed in R3, R4, R5 ... 21 // SP must be 16-byte aligned. 22 23 // Note that for ppc64x, LLVM follows the standard ABI and 24 // expects arguments in registers, so these functions move 25 // the arguments from storage to the registers expected 26 // by the ABI. 27 28 // When calling from Go to Clang tsan code: 29 // R3 is the 1st argument and is usually the ThreadState* 30 // R4-? are the 2nd, 3rd, 4th, etc. arguments 31 32 // When calling racecalladdr: 33 // R8 is the call target address 34 35 // The race ctx is passed in R3 and loaded in 36 // racecalladdr. 37 // 38 // The sequence used to get the race ctx: 39 // MOVD runtime·tls_g(SB), R10 // offset to TLS 40 // MOVD 0(R13)(R10*1), g // R13=TLS for this thread, g = R30 41 // MOVD g_racectx(g), R3 // racectx == ThreadState 42 43 // func runtime·RaceRead(addr uintptr) 44 // Called from instrumented Go code 45 TEXT runtime·raceread(SB), NOSPLIT, $0-8 46 MOVD addr+0(FP), R4 47 MOVD LR, R5 // caller of this? 48 // void __tsan_read(ThreadState *thr, void *addr, void *pc); 49 MOVD $__tsan_read(SB), R8 50 BR racecalladdr<>(SB) 51 52 TEXT runtime·RaceRead(SB), NOSPLIT, $0-8 53 BR runtime·raceread(SB) 54 55 // void runtime·racereadpc(void *addr, void *callpc, void *pc) 56 TEXT runtime·racereadpc(SB), NOSPLIT, $0-24 57 MOVD addr+0(FP), R4 58 MOVD callpc+8(FP), R5 59 MOVD pc+16(FP), R6 60 // void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc); 61 MOVD $__tsan_read_pc(SB), R8 62 BR racecalladdr<>(SB) 63 64 // func runtime·RaceWrite(addr uintptr) 65 // Called from instrumented Go code 66 TEXT runtime·racewrite(SB), NOSPLIT, $0-8 67 MOVD addr+0(FP), R4 68 MOVD LR, R5 // caller has set LR via BL inst 69 // void __tsan_write(ThreadState *thr, void *addr, void *pc); 70 MOVD $__tsan_write(SB), R8 71 BR racecalladdr<>(SB) 72 73 TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8 74 JMP runtime·racewrite(SB) 75 76 // void runtime·racewritepc(void *addr, void *callpc, void *pc) 77 TEXT runtime·racewritepc(SB), NOSPLIT, $0-24 78 MOVD addr+0(FP), R4 79 MOVD callpc+8(FP), R5 80 MOVD pc+16(FP), R6 81 // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc); 82 MOVD $__tsan_write_pc(SB), R8 83 BR racecalladdr<>(SB) 84 85 // func runtime·RaceReadRange(addr, size uintptr) 86 // Called from instrumented Go code. 87 TEXT runtime·racereadrange(SB), NOSPLIT, $0-16 88 MOVD addr+0(FP), R4 89 MOVD size+8(FP), R5 90 MOVD LR, R6 91 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc); 92 MOVD $__tsan_read_range(SB), R8 93 BR racecalladdr<>(SB) 94 95 // void runtime·racereadrangepc1(void *addr, uintptr sz, void *pc) 96 TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24 97 MOVD addr+0(FP), R4 98 MOVD size+8(FP), R5 99 MOVD pc+16(FP), R6 100 ADD $4, R6 // tsan wants return addr 101 // void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc); 102 MOVD $__tsan_read_range(SB), R8 103 BR racecalladdr<>(SB) 104 105 TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16 106 BR runtime·racereadrange(SB) 107 108 // func runtime·RaceWriteRange(addr, size uintptr) 109 // Called from instrumented Go code. 110 TEXT runtime·racewriterange(SB), NOSPLIT, $0-16 111 MOVD addr+0(FP), R4 112 MOVD size+8(FP), R5 113 MOVD LR, R6 114 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc); 115 MOVD $__tsan_write_range(SB), R8 116 BR racecalladdr<>(SB) 117 118 TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16 119 BR runtime·racewriterange(SB) 120 121 // void runtime·racewriterangepc1(void *addr, uintptr sz, void *pc) 122 // Called from instrumented Go code 123 TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24 124 MOVD addr+0(FP), R4 125 MOVD size+8(FP), R5 126 MOVD pc+16(FP), R6 127 ADD $4, R6 // add 4 to inst offset? 128 // void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc); 129 MOVD $__tsan_write_range(SB), R8 130 BR racecalladdr<>(SB) 131 132 // Call a __tsan function from Go code. 133 // R8 = tsan function address 134 // R3 = *ThreadState a.k.a. g_racectx from g 135 // R4 = addr passed to __tsan function 136 // 137 // Otherwise, setup goroutine context and invoke racecall. Other arguments already set. 138 TEXT racecalladdr<>(SB), NOSPLIT, $0-0 139 MOVD runtime·tls_g(SB), R10 140 MOVD 0(R13)(R10*1), g 141 MOVD g_racectx(g), R3 // goroutine context 142 // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend). 143 MOVD runtime·racearenastart(SB), R9 144 CMP R4, R9 145 BLT data 146 MOVD runtime·racearenaend(SB), R9 147 CMP R4, R9 148 BLT call 149 data: 150 MOVD runtime·racedatastart(SB), R9 151 CMP R4, R9 152 BLT ret 153 MOVD runtime·racedataend(SB), R9 154 CMP R4, R9 155 BGT ret 156 call: 157 // Careful!! racecall will save LR on its 158 // stack, which is OK as long as racecalladdr 159 // doesn't change in a way that generates a stack. 160 // racecall should return to the caller of 161 // recalladdr. 162 BR racecall<>(SB) 163 ret: 164 RET 165 166 // func runtime·racefuncenterfp() 167 // Called from instrumented Go code. 168 // Like racefuncenter but doesn't pass an arg, uses the caller pc 169 // from the first slot on the stack. 170 TEXT runtime·racefuncenterfp(SB), NOSPLIT, $0-0 171 MOVD 0(R1), R8 172 BR racefuncenter<>(SB) 173 174 // func runtime·racefuncenter(pc uintptr) 175 // Called from instrumented Go code. 176 // Not used now since gc/racewalk.go doesn't pass the 177 // correct caller pc and racefuncenterfp can do it. 178 TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8 179 MOVD callpc+0(FP), R8 180 BR racefuncenter<>(SB) 181 182 // Common code for racefuncenter/racefuncenterfp 183 // R11 = caller's return address 184 TEXT racefuncenter<>(SB), NOSPLIT, $0-0 185 MOVD runtime·tls_g(SB), R10 186 MOVD 0(R13)(R10*1), g 187 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState 188 MOVD R8, R4 // caller pc set by caller in R8 189 // void __tsan_func_enter(ThreadState *thr, void *pc); 190 MOVD $__tsan_func_enter(SB), R8 191 BR racecall<>(SB) 192 RET 193 194 // func runtime·racefuncexit() 195 // Called from Go instrumented code. 196 TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0 197 MOVD runtime·tls_g(SB), R10 198 MOVD 0(R13)(R10*1), g 199 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState 200 // void __tsan_func_exit(ThreadState *thr); 201 MOVD $__tsan_func_exit(SB), R8 202 BR racecall<>(SB) 203 204 // Atomic operations for sync/atomic package. 205 // Some use the __tsan versions instead 206 // R6 = addr of arguments passed to this function 207 // R3, R4, R5 set in racecallatomic 208 209 // Load atomic in tsan 210 TEXT sync∕atomic·LoadInt32(SB), NOSPLIT, $0-12 211 GO_ARGS 212 // void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a); 213 MOVD $__tsan_go_atomic32_load(SB), R8 214 ADD $32, R1, R6 // addr of caller's 1st arg 215 BR racecallatomic<>(SB) 216 RET 217 218 TEXT sync∕atomic·LoadInt64(SB), NOSPLIT, $0-16 219 GO_ARGS 220 // void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a); 221 MOVD $__tsan_go_atomic64_load(SB), R8 222 ADD $32, R1, R6 // addr of caller's 1st arg 223 BR racecallatomic<>(SB) 224 RET 225 226 TEXT sync∕atomic·LoadUint32(SB), NOSPLIT, $0-12 227 GO_ARGS 228 BR sync∕atomic·LoadInt32(SB) 229 230 TEXT sync∕atomic·LoadUint64(SB), NOSPLIT, $0-16 231 GO_ARGS 232 BR sync∕atomic·LoadInt64(SB) 233 234 TEXT sync∕atomic·LoadUintptr(SB), NOSPLIT, $0-16 235 GO_ARGS 236 BR sync∕atomic·LoadInt64(SB) 237 238 TEXT sync∕atomic·LoadPointer(SB), NOSPLIT, $0-16 239 GO_ARGS 240 BR sync∕atomic·LoadInt64(SB) 241 242 // Store atomic in tsan 243 TEXT sync∕atomic·StoreInt32(SB), NOSPLIT, $0-12 244 GO_ARGS 245 // void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a); 246 MOVD $__tsan_go_atomic32_store(SB), R8 247 ADD $32, R1, R6 // addr of caller's 1st arg 248 BR racecallatomic<>(SB) 249 250 TEXT sync∕atomic·StoreInt64(SB), NOSPLIT, $0-16 251 GO_ARGS 252 // void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a); 253 MOVD $__tsan_go_atomic64_store(SB), R8 254 ADD $32, R1, R6 // addr of caller's 1st arg 255 BR racecallatomic<>(SB) 256 257 TEXT sync∕atomic·StoreUint32(SB), NOSPLIT, $0-12 258 GO_ARGS 259 BR sync∕atomic·StoreInt32(SB) 260 261 TEXT sync∕atomic·StoreUint64(SB), NOSPLIT, $0-16 262 GO_ARGS 263 BR sync∕atomic·StoreInt64(SB) 264 265 TEXT sync∕atomic·StoreUintptr(SB), NOSPLIT, $0-16 266 GO_ARGS 267 BR sync∕atomic·StoreInt64(SB) 268 269 // Swap in tsan 270 TEXT sync∕atomic·SwapInt32(SB), NOSPLIT, $0-20 271 GO_ARGS 272 // void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a); 273 MOVD $__tsan_go_atomic32_exchange(SB), R8 274 ADD $32, R1, R6 // addr of caller's 1st arg 275 BR racecallatomic<>(SB) 276 277 TEXT sync∕atomic·SwapInt64(SB), NOSPLIT, $0-24 278 GO_ARGS 279 // void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) 280 MOVD $__tsan_go_atomic64_exchange(SB), R8 281 ADD $32, R1, R6 // addr of caller's 1st arg 282 BR racecallatomic<>(SB) 283 284 TEXT sync∕atomic·SwapUint32(SB), NOSPLIT, $0-20 285 GO_ARGS 286 BR sync∕atomic·SwapInt32(SB) 287 288 TEXT sync∕atomic·SwapUint64(SB), NOSPLIT, $0-24 289 GO_ARGS 290 BR sync∕atomic·SwapInt64(SB) 291 292 TEXT sync∕atomic·SwapUintptr(SB), NOSPLIT, $0-24 293 GO_ARGS 294 BR sync∕atomic·SwapInt64(SB) 295 296 // Add atomic in tsan 297 TEXT sync∕atomic·AddInt32(SB), NOSPLIT, $0-20 298 GO_ARGS 299 // void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a); 300 MOVD $__tsan_go_atomic32_fetch_add(SB), R8 301 ADD $64, R1, R6 // addr of caller's 1st arg 302 BL racecallatomic<>(SB) 303 // The tsan fetch_add result is not as expected by Go, 304 // so the 'add' must be added to the result. 305 MOVW add+8(FP), R3 // The tsa fetch_add does not return the 306 MOVW ret+16(FP), R4 // result as expected by go, so fix it. 307 ADD R3, R4, R3 308 MOVW R3, ret+16(FP) 309 RET 310 311 TEXT sync∕atomic·AddInt64(SB), NOSPLIT, $0-24 312 GO_ARGS 313 // void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a); 314 MOVD $__tsan_go_atomic64_fetch_add(SB), R8 315 ADD $64, R1, R6 // addr of caller's 1st arg 316 BL racecallatomic<>(SB) 317 // The tsan fetch_add result is not as expected by Go, 318 // so the 'add' must be added to the result. 319 MOVD add+8(FP), R3 320 MOVD ret+16(FP), R4 321 ADD R3, R4, R3 322 MOVD R3, ret+16(FP) 323 RET 324 325 TEXT sync∕atomic·AddUint32(SB), NOSPLIT, $0-20 326 GO_ARGS 327 BR sync∕atomic·AddInt32(SB) 328 329 TEXT sync∕atomic·AddUint64(SB), NOSPLIT, $0-24 330 GO_ARGS 331 BR sync∕atomic·AddInt64(SB) 332 333 TEXT sync∕atomic·AddUintptr(SB), NOSPLIT, $0-24 334 GO_ARGS 335 BR sync∕atomic·AddInt64(SB) 336 337 // CompareAndSwap in tsan 338 TEXT sync∕atomic·CompareAndSwapInt32(SB), NOSPLIT, $0-17 339 GO_ARGS 340 // void __tsan_go_atomic32_compare_exchange( 341 // ThreadState *thr, uptr cpc, uptr pc, u8 *a) 342 MOVD $__tsan_go_atomic32_compare_exchange(SB), R8 343 ADD $32, R1, R6 // addr of caller's 1st arg 344 BR racecallatomic<>(SB) 345 346 TEXT sync∕atomic·CompareAndSwapInt64(SB), NOSPLIT, $0-25 347 GO_ARGS 348 // void __tsan_go_atomic32_compare_exchange( 349 // ThreadState *thr, uptr cpc, uptr pc, u8 *a) 350 MOVD $__tsan_go_atomic64_compare_exchange(SB), R8 351 ADD $32, R1, R6 // addr of caller's 1st arg 352 BR racecallatomic<>(SB) 353 354 TEXT sync∕atomic·CompareAndSwapUint32(SB), NOSPLIT, $0-17 355 GO_ARGS 356 BR sync∕atomic·CompareAndSwapInt32(SB) 357 358 TEXT sync∕atomic·CompareAndSwapUint64(SB), NOSPLIT, $0-25 359 GO_ARGS 360 BR sync∕atomic·CompareAndSwapInt64(SB) 361 362 TEXT sync∕atomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-25 363 GO_ARGS 364 BR sync∕atomic·CompareAndSwapInt64(SB) 365 366 // Common function used to call tsan's atomic functions 367 // R3 = *ThreadState 368 // R4 = TODO: What's this supposed to be? 369 // R5 = caller pc 370 // R6 = addr of incoming arg list 371 // R8 contains addr of target function. 372 TEXT racecallatomic<>(SB), NOSPLIT, $0-0 373 // Trigger SIGSEGV early if address passed to atomic function is bad. 374 MOVD (R6), R7 // 1st arg is addr 375 MOVD (R7), R9 // segv here if addr is bad 376 // Check that addr is within [arenastart, arenaend) or within [racedatastart, racedataend). 377 MOVD runtime·racearenastart(SB), R9 378 CMP R7, R9 379 BLT racecallatomic_data 380 MOVD runtime·racearenaend(SB), R9 381 CMP R7, R9 382 BLT racecallatomic_ok 383 racecallatomic_data: 384 MOVD runtime·racedatastart(SB), R9 385 CMP R7, R9 386 BLT racecallatomic_ignore 387 MOVD runtime·racedataend(SB), R9 388 CMP R7, R9 389 BGE racecallatomic_ignore 390 racecallatomic_ok: 391 // Addr is within the good range, call the atomic function. 392 MOVD runtime·tls_g(SB), R10 393 MOVD 0(R13)(R10*1), g 394 MOVD g_racectx(g), R3 // goroutine racectx aka *ThreadState 395 MOVD R8, R5 // pc is the function called 396 MOVD (R1), R4 // caller pc from stack 397 BL racecall<>(SB) // BL needed to maintain stack consistency 398 RET // 399 racecallatomic_ignore: 400 // Addr is outside the good range. 401 // Call __tsan_go_ignore_sync_begin to ignore synchronization during the atomic op. 402 // An attempt to synchronize on the address would cause crash. 403 MOVD R8, R15 // save the original function 404 MOVD R6, R17 // save the original arg list addr 405 MOVD $__tsan_go_ignore_sync_begin(SB), R8 // func addr to call 406 MOVD runtime·tls_g(SB), R10 407 MOVD 0(R13)(R10*1), g 408 MOVD g_racectx(g), R3 // goroutine context 409 BL racecall<>(SB) 410 MOVD R15, R8 // restore the original function 411 MOVD R17, R6 // restore arg list addr 412 // Call the atomic function. 413 // racecall will call LLVM race code which might clobber r30 (g) 414 MOVD runtime·tls_g(SB), R10 415 MOVD 0(R13)(R10*1), g 416 417 MOVD g_racectx(g), R3 418 MOVD R8, R4 // pc being called same TODO as above 419 MOVD (R1), R5 // caller pc from latest LR 420 BL racecall<>(SB) 421 // Call __tsan_go_ignore_sync_end. 422 MOVD $__tsan_go_ignore_sync_end(SB), R8 423 MOVD g_racectx(g), R3 // goroutine context g should sitll be good? 424 BL racecall<>(SB) 425 RET 426 427 // void runtime·racecall(void(*f)(...), ...) 428 // Calls C function f from race runtime and passes up to 4 arguments to it. 429 // The arguments are never heap-object-preserving pointers, so we pretend there are no arguments. 430 TEXT runtime·racecall(SB), NOSPLIT, $0-0 431 MOVD fn+0(FP), R8 432 MOVD arg0+8(FP), R3 433 MOVD arg1+16(FP), R4 434 MOVD arg2+24(FP), R5 435 MOVD arg3+32(FP), R6 436 JMP racecall<>(SB) 437 438 // Finds g0 and sets its stack 439 // Arguments were loaded for call from Go to C 440 TEXT racecall<>(SB), NOSPLIT, $0-0 441 // Set the LR slot for the ppc64 ABI 442 MOVD LR, R10 443 MOVD R10, 0(R1) // Go expectation 444 MOVD R10, 16(R1) // C ABI 445 // Get info from the current goroutine 446 MOVD runtime·tls_g(SB), R10 // g offset in TLS 447 MOVD 0(R13)(R10*1), g // R13 = current TLS 448 MOVD g_m(g), R7 // m for g 449 MOVD R1, R16 // callee-saved, preserved across C call 450 MOVD m_g0(R7), R10 // g0 for m 451 CMP R10, g // same g0? 452 BEQ call // already on g0 453 MOVD (g_sched+gobuf_sp)(R10), R1 // switch R1 454 call: 455 MOVD R8, CTR // R8 = caller addr 456 MOVD R8, R12 // expected by PPC64 ABI 457 BL (CTR) 458 XOR R0, R0 // clear R0 on return from Clang 459 MOVD R16, R1 // restore R1; R16 nonvol in Clang 460 MOVD runtime·tls_g(SB), R10 // find correct g 461 MOVD 0(R13)(R10*1), g 462 MOVD 16(R1), R10 // LR was saved away, restore for return 463 MOVD R10, LR 464 RET 465 466 // C->Go callback thunk that allows to call runtime·racesymbolize from C code. 467 // Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g. 468 // The overall effect of Go->C->Go call chain is similar to that of mcall. 469 // RARG0 contains command code. RARG1 contains command-specific context. 470 // See racecallback for command codes. 471 TEXT runtime·racecallbackthunk(SB), NOSPLIT, $-8 472 // Handle command raceGetProcCmd (0) here. 473 // First, code below assumes that we are on curg, while raceGetProcCmd 474 // can be executed on g0. Second, it is called frequently, so will 475 // benefit from this fast path. 476 XOR R0, R0 // clear R0 since we came from C code 477 CMP R3, $0 478 BNE rest 479 // g0 TODO: Don't modify g here since R30 is nonvolatile 480 MOVD g, R9 481 MOVD runtime·tls_g(SB), R10 482 MOVD 0(R13)(R10*1), g 483 MOVD g_m(g), R3 484 MOVD m_p(R3), R3 485 MOVD p_raceprocctx(R3), R3 486 MOVD R3, (R4) 487 MOVD R9, g // restore R30 ?? 488 RET 489 490 // This is all similar to what cgo does 491 // Save registers according to the ppc64 ABI 492 rest: 493 MOVD LR, R10 // save link register 494 MOVD R10, 16(R1) 495 MOVW CR, R10 496 MOVW R10, 8(R1) 497 MOVDU R1, -336(R1) // Allocate frame needed for outargs and register save area 498 499 MOVD R14, 328(R1) 500 MOVD R15, 48(R1) 501 MOVD R16, 56(R1) 502 MOVD R17, 64(R1) 503 MOVD R18, 72(R1) 504 MOVD R19, 80(R1) 505 MOVD R20, 88(R1) 506 MOVD R21, 96(R1) 507 MOVD R22, 104(R1) 508 MOVD R23, 112(R1) 509 MOVD R24, 120(R1) 510 MOVD R25, 128(R1) 511 MOVD R26, 136(R1) 512 MOVD R27, 144(R1) 513 MOVD R28, 152(R1) 514 MOVD R29, 160(R1) 515 MOVD g, 168(R1) // R30 516 MOVD R31, 176(R1) 517 FMOVD F14, 184(R1) 518 FMOVD F15, 192(R1) 519 FMOVD F16, 200(R1) 520 FMOVD F17, 208(R1) 521 FMOVD F18, 216(R1) 522 FMOVD F19, 224(R1) 523 FMOVD F20, 232(R1) 524 FMOVD F21, 240(R1) 525 FMOVD F22, 248(R1) 526 FMOVD F23, 256(R1) 527 FMOVD F24, 264(R1) 528 FMOVD F25, 272(R1) 529 FMOVD F26, 280(R1) 530 FMOVD F27, 288(R1) 531 FMOVD F28, 296(R1) 532 FMOVD F29, 304(R1) 533 FMOVD F30, 312(R1) 534 FMOVD F31, 320(R1) 535 536 MOVD R3, FIXED_FRAME+0(R1) 537 MOVD R4, FIXED_FRAME+8(R1) 538 539 MOVD runtime·tls_g(SB), R10 540 MOVD 0(R13)(R10*1), g 541 542 MOVD g_m(g), R7 543 MOVD m_g0(R7), R8 544 CMP g, R8 545 BEQ noswitch 546 547 MOVD R8, g // set g = m-> g0 548 549 BL runtime·racecallback(SB) 550 551 // All registers are clobbered after Go code, reload. 552 MOVD runtime·tls_g(SB), R10 553 MOVD 0(R13)(R10*1), g 554 555 MOVD g_m(g), R7 556 MOVD m_curg(R7), g // restore g = m->curg 557 558 ret: 559 MOVD 328(R1), R14 560 MOVD 48(R1), R15 561 MOVD 56(R1), R16 562 MOVD 64(R1), R17 563 MOVD 72(R1), R18 564 MOVD 80(R1), R19 565 MOVD 88(R1), R20 566 MOVD 96(R1), R21 567 MOVD 104(R1), R22 568 MOVD 112(R1), R23 569 MOVD 120(R1), R24 570 MOVD 128(R1), R25 571 MOVD 136(R1), R26 572 MOVD 144(R1), R27 573 MOVD 152(R1), R28 574 MOVD 160(R1), R29 575 MOVD 168(R1), g // R30 576 MOVD 176(R1), R31 577 FMOVD 184(R1), F14 578 FMOVD 192(R1), F15 579 FMOVD 200(R1), F16 580 FMOVD 208(R1), F17 581 FMOVD 216(R1), F18 582 FMOVD 224(R1), F19 583 FMOVD 232(R1), F20 584 FMOVD 240(R1), F21 585 FMOVD 248(R1), F22 586 FMOVD 256(R1), F23 587 FMOVD 264(R1), F24 588 FMOVD 272(R1), F25 589 FMOVD 280(R1), F26 590 FMOVD 288(R1), F27 591 FMOVD 296(R1), F28 592 FMOVD 304(R1), F29 593 FMOVD 312(R1), F30 594 FMOVD 320(R1), F31 595 596 ADD $336, R1 597 MOVD 8(R1), R10 598 MOVFL R10, $0xff // Restore of CR 599 MOVD 16(R1), R10 // needed? 600 MOVD R10, LR 601 RET 602 603 noswitch: 604 BL runtime·racecallback(SB) 605 JMP ret 606 607 // tls_g, g value for each thread in TLS 608 GLOBL runtime·tls_g+0(SB), TLSBSS+DUPOK, $8