github.com/riscv/riscv-go@v0.0.0-20200123204226-124ebd6fcc8e/src/runtime/internal/atomic/atomic_riscv.s (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 #include "textflag.h" 6 7 #define AMOWSC(op,rd,rs1,rs2) WORD $0x0600202f+rd<<7+rs1<<15+rs2<<20+op<<27 8 #define AMODSC(op,rd,rs1,rs2) WORD $0x0600302f+rd<<7+rs1<<15+rs2<<20+op<<27 9 #define ADD_ 0 10 #define SWAP_ 1 11 #define LR_ 2 12 #define SC_ 3 13 #define OR_ 8 14 #define AND_ 12 15 #define FENCE WORD $0x0ff0000f 16 17 // func Load(ptr *uint32) uint32 18 TEXT ·Load(SB),NOSPLIT,$-8-12 19 MOV ptr+0(FP), A0 20 AMOWSC(LR_,10,10,0) 21 MOVW A0, ret+8(FP) 22 RET 23 24 // func Load64(ptr *uint64) uint64 25 TEXT ·Load64(SB),NOSPLIT,$-8-16 26 MOV ptr+0(FP), A0 27 AMODSC(LR_,10,10,0) 28 MOV A0, ret+8(FP) 29 RET 30 31 // func Loadp(ptr unsafe.Pointer) unsafe.Pointer 32 TEXT ·Loadp(SB),NOSPLIT,$0-16 33 JMP ·Load64(SB) 34 35 // func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) 36 TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 37 JMP ·Store64(SB) 38 39 // func Store(ptr *uint32, val uint32) 40 TEXT ·Store(SB), NOSPLIT, $0-12 41 MOV ptr+0(FP), A0 42 MOVW val+8(FP), A1 43 AMOWSC(SWAP_,0,10,11) 44 RET 45 46 // func Store64(ptr *uint64, val uint64) 47 TEXT ·Store64(SB), NOSPLIT, $0-16 48 MOV ptr+0(FP), A0 49 MOV val+8(FP), A1 50 AMODSC(SWAP_,0,10,11) 51 RET 52 53 // func Xchg(ptr *uint32, new uint32) uint32 54 TEXT ·Xchg(SB), NOSPLIT, $0-20 55 MOV ptr+0(FP), A0 56 MOVW new+8(FP), A1 57 AMOWSC(SWAP_,11,10,11) 58 MOVW A1, ret+16(FP) 59 RET 60 61 // func Xchg64(ptr *uint64, new uint64) uint64 62 TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24 63 MOV ptr+0(FP), A0 64 MOV new+8(FP), A1 65 AMODSC(SWAP_,11,10,11) 66 MOV A1, ret+16(FP) 67 RET 68 69 // Atomically: 70 // if(*val == *old){ 71 // *val = new; 72 // return 1; 73 // } else { 74 // return 0; 75 // } 76 77 // func Cas64(ptr *uint64, old, new uint64) bool 78 TEXT ·Cas64(SB), NOSPLIT, $0-25 79 MOV ptr+0(FP), A0 80 MOV old+8(FP), A1 81 MOV new+16(FP), A2 82 again: 83 AMODSC(LR_,13,10,0) 84 BNE A3, A1, fail 85 AMODSC(SC_,14,10,12) 86 BNE A4, ZERO, again 87 MOV $1, A0 88 MOVB A0, ret+24(FP) 89 RET 90 fail: 91 MOVB ZERO, ret+24(FP) 92 RET 93 94 // Atomically: 95 // *val += delta; 96 // return *val; 97 98 // func Xadd(ptr *uint32, delta int32) uint32 99 TEXT ·Xadd(SB), NOSPLIT, $0-20 100 MOV ptr+0(FP), A0 101 MOVW delta+8(FP), A1 102 AMOWSC(ADD_,12,10,11) 103 ADD A2,A1,A0 104 MOVW A0, ret+16(FP) 105 RET 106 107 // func Xadd64(ptr *uint64, delta int64) uint64 108 TEXT ·Xadd64(SB), NOSPLIT, $0-24 109 MOV ptr+0(FP), A0 110 MOV delta+8(FP), A1 111 AMODSC(ADD_,12,10,11) 112 ADD A2,A1,A0 113 MOV A0, ret+16(FP) 114 RET 115 116 // func Xadduintptr(ptr *uintptr, delta uintptr) uintptr 117 TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 118 JMP ·Xadd64(SB) 119 120 // func Xchguintptr(ptr *uintptr, new uintptr) uintptr 121 TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 122 JMP ·Xchg64(SB) 123 124 // func And8(ptr *uint8, val uint8) 125 TEXT ·And8(SB), NOSPLIT, $0-9 126 MOV ptr+0(FP), A0 127 MOVBU val+8(FP), A1 128 AND $3, A0, A2 129 AND $-4, A0 130 SLL $3, A2 131 XOR $255, A1 132 SLL A2, A1 133 XOR $-1, A1 134 AMOWSC(AND_,0,10,11) 135 RET 136 137 // func Or8(ptr *uint8, val uint8) 138 TEXT ·Or8(SB), NOSPLIT, $0-9 139 MOV ptr+0(FP), A0 140 MOVBU val+8(FP), A1 141 AND $3, A0, A2 142 AND $-4, A0 143 SLL $3, A2 144 SLL A2, A1 145 AMOWSC(OR_,0,10,11) 146 RET