github.com/primecitizens/pcz/std@v0.2.1/core/atomic/atomic_riscv64.s (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright 2023 The Prime Citizens 3 // 4 // Copyright 2014 The Go Authors. All rights reserved. 5 // Use of this source code is governed by a BSD-style 6 // license that can be found in the LICENSE file. 7 8 // RISC-V's atomic operations have two bits, aq ("acquire") and rl ("release"), 9 // which may be toggled on and off. Their precise semantics are defined in 10 // section 6.3 of the specification, but the basic idea is as follows: 11 // 12 // - If neither aq nor rl is set, the CPU may reorder the atomic arbitrarily. 13 // It guarantees only that it will execute atomically. 14 // 15 // - If aq is set, the CPU may move the instruction backward, but not forward. 16 // 17 // - If rl is set, the CPU may move the instruction forward, but not backward. 18 // 19 // - If both are set, the CPU may not reorder the instruction at all. 20 // 21 // These four modes correspond to other well-known memory models on other CPUs. 22 // On ARM, aq corresponds to a dmb ishst, aq+rl corresponds to a dmb ish. On 23 // Intel, aq corresponds to an lfence, rl to an sfence, and aq+rl to an mfence 24 // (or a lock prefix). 25 // 26 // Go's memory model requires that 27 // - if a read happens after a write, the read must observe the write, and 28 // that 29 // - if a read happens concurrently with a write, the read may observe the 30 // write. 31 // aq is sufficient to guarantee this, so that's what we use here. (This jibes 32 // with ARM, which uses dmb ishst.) 33 34 //go:build riscv64 35 36 #include "textflag.h" 37 38 TEXT ·PublicationBarrier(SB),NOSPLIT|NOFRAME,$0-0 39 FENCE 40 RET 41 42 // 43 // Store 44 // 45 46 // func Store8(ptr *uint8, val uint8) 47 TEXT ·Store8(SB), NOSPLIT, $0-9 48 MOV ptr+0(FP), A0 49 MOVBU val+8(FP), A1 50 FENCE 51 MOVB A1, (A0) 52 FENCE 53 RET 54 55 // func Store32(ptr *uint32, val uint32) 56 TEXT ·Store32(SB), NOSPLIT, $0-12 57 MOV ptr+0(FP), A0 58 MOVW val+8(FP), A1 59 AMOSWAPW A1, (A0), ZERO 60 RET 61 62 // func Store64(ptr *uint64, val uint64) 63 TEXT ·Store64(SB), NOSPLIT, $0-16 64 MOV ptr+0(FP), A0 65 MOV val+8(FP), A1 66 AMOSWAPD A1, (A0), ZERO 67 RET 68 69 TEXT ·StoreUintptr(SB),NOSPLIT,$0-16 70 JMP ·Store64(SB) 71 72 // func StorePointer(ptr unsafe.Pointer, val unsafe.Pointer) 73 TEXT ·StorePointer(SB), NOSPLIT, $0-16 74 JMP ·Store64(SB) 75 76 TEXT ·StoreInt32(SB),NOSPLIT,$0-12 77 JMP ·Store(SB) 78 79 TEXT ·StoreInt64(SB),NOSPLIT,$0-16 80 JMP ·Store64(SB) 81 82 // 83 // StoreRel 84 // 85 86 TEXT ·StoreRel32(SB), NOSPLIT, $0-12 87 JMP ·Store(SB) 88 89 TEXT ·StoreRel64(SB), NOSPLIT, $0-16 90 JMP ·Store64(SB) 91 92 TEXT ·StoreRelUintptr(SB), NOSPLIT, $0-16 93 JMP ·Store64(SB) 94 95 96 // 97 // Load 98 // 99 100 // func Load8(ptr *uint8) uint8 101 TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9 102 MOV ptr+0(FP), A0 103 FENCE 104 MOVBU (A0), A1 105 FENCE 106 MOVB A1, ret+8(FP) 107 RET 108 109 // func Load32(ptr *uint32) uint32 110 TEXT ·Load32(SB),NOSPLIT|NOFRAME,$0-12 111 MOV ptr+0(FP), A0 112 LRW (A0), A0 113 MOVW A0, ret+8(FP) 114 RET 115 116 // func Load64(ptr *uint64) uint64 117 TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16 118 MOV ptr+0(FP), A0 119 LRD (A0), A0 120 MOV A0, ret+8(FP) 121 RET 122 123 TEXT ·LoadUintptr(SB),NOSPLIT,$0-16 124 JMP ·Load64(SB) 125 126 // func LoadPointer(ptr unsafe.Pointer) unsafe.Pointer 127 TEXT ·LoadPointer(SB),NOSPLIT,$0-16 128 JMP ·Load64(SB) 129 130 TEXT ·LoadUint(SB),NOSPLIT,$0-16 131 JMP ·LoadUintptr(SB) 132 133 TEXT ·LoadInt32(SB),NOSPLIT,$0-12 134 JMP ·Load32(SB) 135 136 TEXT ·LoadInt64(SB),NOSPLIT,$0-16 137 JMP ·Load64(SB) 138 139 // 140 // LoadAcq 141 // 142 TEXT ·LoadAcq32(SB),NOSPLIT|NOFRAME,$0-12 143 JMP ·Load32(SB) 144 145 TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16 146 JMP ·Load64(SB) 147 148 TEXT ·LoadAcqUintptr(SB),NOSPLIT|NOFRAME,$0-16 149 JMP ·Load64(SB) 150 151 // 152 // bitwise 153 // 154 155 // func And8(ptr *uint8, val uint8) 156 TEXT ·And8(SB), NOSPLIT, $0-9 157 MOV ptr+0(FP), A0 158 MOVBU val+8(FP), A1 159 AND $3, A0, A2 160 AND $-4, A0 161 SLL $3, A2 162 XOR $255, A1 163 SLL A2, A1 164 XOR $-1, A1 165 AMOANDW A1, (A0), ZERO 166 RET 167 168 // func And32(ptr *uint32, val uint32) 169 TEXT ·And32(SB), NOSPLIT, $0-12 170 MOV ptr+0(FP), A0 171 MOVW val+8(FP), A1 172 AMOANDW A1, (A0), ZERO 173 RET 174 175 // func Or8(ptr *uint8, val uint8) 176 TEXT ·Or8(SB), NOSPLIT, $0-9 177 MOV ptr+0(FP), A0 178 MOVBU val+8(FP), A1 179 AND $3, A0, A2 180 AND $-4, A0 181 SLL $3, A2 182 SLL A2, A1 183 AMOORW A1, (A0), ZERO 184 RET 185 186 // func Or32(ptr *uint32, val uint32) 187 TEXT ·Or32(SB), NOSPLIT, $0-12 188 MOV ptr+0(FP), A0 189 MOVW val+8(FP), A1 190 AMOORW A1, (A0), ZERO 191 RET 192 193 // 194 // Swap 195 // 196 197 // func Swap32(ptr *uint32, new uint32) uint32 198 TEXT ·Swap32(SB), NOSPLIT, $0-20 199 MOV ptr+0(FP), A0 200 MOVW new+8(FP), A1 201 AMOSWAPW A1, (A0), A1 202 MOVW A1, ret+16(FP) 203 RET 204 205 // func Swap64(ptr *uint64, new uint64) uint64 206 TEXT ·Swap64(SB), NOSPLIT, $0-24 207 MOV ptr+0(FP), A0 208 MOV new+8(FP), A1 209 AMOSWAPD A1, (A0), A1 210 MOV A1, ret+16(FP) 211 RET 212 213 // func SwapUintptr(ptr *uintptr, new uintptr) uintptr 214 TEXT ·SwapUintptr(SB), NOSPLIT, $0-24 215 JMP ·Swap64(SB) 216 217 // func SwapInt32(ptr *int32, new int32) int32 218 TEXT ·SwapInt32(SB), NOSPLIT, $0-20 219 JMP ·Swap32(SB) 220 221 // func SwapInt64(ptr *int64, new int64) int64 222 TEXT ·SwapInt64(SB), NOSPLIT, $0-24 223 JMP ·Swap64(SB) 224 225 // 226 // Add 227 // 228 229 // func Add32(ptr *uint32, delta int32) uint32 230 TEXT ·Add32(SB), NOSPLIT, $0-20 231 MOV ptr+0(FP), A0 232 MOVW delta+8(FP), A1 233 AMOADDW A1, (A0), A2 234 ADD A2,A1,A0 235 MOVW A0, ret+16(FP) 236 RET 237 238 // func Add64(ptr *uint64, delta int64) uint64 239 TEXT ·Add64(SB), NOSPLIT, $0-24 240 MOV ptr+0(FP), A0 241 MOV delta+8(FP), A1 242 AMOADDD A1, (A0), A2 243 ADD A2, A1, A0 244 MOV A0, ret+16(FP) 245 RET 246 247 // func AddUintptr(ptr *uintptr, delta uintptr) uintptr 248 TEXT ·AddUintptr(SB), NOSPLIT, $0-24 249 JMP ·Add64(SB) 250 251 TEXT ·AddInt32(SB),NOSPLIT,$0-20 252 JMP ·Add32(SB) 253 254 TEXT ·AddInt64(SB),NOSPLIT,$0-24 255 MOV ptr+0(FP), A0 256 MOV delta+8(FP), A1 257 AMOADDD A1, (A0), A0 258 ADD A0, A1, A0 259 MOVW A0, ret+16(FP) 260 RET 261 262 // 263 // Compare and swap 264 // 265 266 // func Cas32(ptr *uint64, old, new uint64) bool 267 TEXT ·Cas32(SB), NOSPLIT, $0-17 268 MOV ptr+0(FP), A0 269 MOVW old+8(FP), A1 270 MOVW new+12(FP), A2 271 cas_again: 272 LRW (A0), A3 273 BNE A3, A1, cas_fail 274 SCW A2, (A0), A4 275 BNE A4, ZERO, cas_again 276 MOV $1, A0 277 MOVB A0, ret+16(FP) 278 RET 279 cas_fail: 280 MOV $0, A0 281 MOV A0, ret+16(FP) 282 RET 283 284 // func Cas64(ptr *uint64, old, new uint64) bool 285 TEXT ·Cas64(SB), NOSPLIT, $0-25 286 MOV ptr+0(FP), A0 287 MOV old+8(FP), A1 288 MOV new+16(FP), A2 289 cas_again: 290 LRD (A0), A3 291 BNE A3, A1, cas_fail 292 SCD A2, (A0), A4 293 BNE A4, ZERO, cas_again 294 MOV $1, A0 295 MOVB A0, ret+24(FP) 296 RET 297 cas_fail: 298 MOVB ZERO, ret+24(FP) 299 RET 300 301 TEXT ·CasUintptr(SB),NOSPLIT,$0-25 302 JMP ·Cas64(SB) 303 304 TEXT ·CasUnsafePointer(SB), NOSPLIT, $0-25 305 JMP ·Cas64(SB) 306 307 TEXT ·CasInt32(SB),NOSPLIT,$0-17 308 JMP ·Cas32(SB) 309 310 TEXT ·CasInt64(SB),NOSPLIT,$0-25 311 JMP ·Cas64(SB) 312 313 // 314 // CasRel 315 // 316 317 TEXT ·CasRel32(SB), NOSPLIT, $0-17 318 JMP ·Cas32(SB) 319