github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/runtime/internal/atomic/atomic_mipsx.s (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build mips || mipsle 6 7 #include "textflag.h" 8 9 // bool Cas(int32 *val, int32 old, int32 new) 10 // Atomically: 11 // if(*val == old){ 12 // *val = new; 13 // return 1; 14 // } else 15 // return 0; 16 TEXT ·Cas(SB),NOSPLIT,$0-13 17 MOVW ptr+0(FP), R1 18 MOVW old+4(FP), R2 19 MOVW new+8(FP), R5 20 SYNC 21 try_cas: 22 MOVW R5, R3 23 LL (R1), R4 // R4 = *R1 24 BNE R2, R4, cas_fail 25 SC R3, (R1) // *R1 = R3 26 BEQ R3, try_cas 27 SYNC 28 MOVB R3, ret+12(FP) 29 RET 30 cas_fail: 31 SYNC 32 MOVB R0, ret+12(FP) 33 RET 34 35 TEXT ·Store(SB),NOSPLIT,$0-8 36 MOVW ptr+0(FP), R1 37 MOVW val+4(FP), R2 38 SYNC 39 MOVW R2, 0(R1) 40 SYNC 41 RET 42 43 TEXT ·Store8(SB),NOSPLIT,$0-5 44 MOVW ptr+0(FP), R1 45 MOVB val+4(FP), R2 46 SYNC 47 MOVB R2, 0(R1) 48 SYNC 49 RET 50 51 TEXT ·Load(SB),NOSPLIT,$0-8 52 MOVW ptr+0(FP), R1 53 SYNC 54 MOVW 0(R1), R1 55 SYNC 56 MOVW R1, ret+4(FP) 57 RET 58 59 TEXT ·Load8(SB),NOSPLIT,$0-5 60 MOVW ptr+0(FP), R1 61 SYNC 62 MOVB 0(R1), R1 63 SYNC 64 MOVB R1, ret+4(FP) 65 RET 66 67 // uint32 Xadd(uint32 volatile *val, int32 delta) 68 // Atomically: 69 // *val += delta; 70 // return *val; 71 TEXT ·Xadd(SB),NOSPLIT,$0-12 72 MOVW ptr+0(FP), R2 73 MOVW delta+4(FP), R3 74 SYNC 75 try_xadd: 76 LL (R2), R1 // R1 = *R2 77 ADDU R1, R3, R4 78 MOVW R4, R1 79 SC R4, (R2) // *R2 = R4 80 BEQ R4, try_xadd 81 SYNC 82 MOVW R1, ret+8(FP) 83 RET 84 85 // uint32 Xchg(ptr *uint32, new uint32) 86 // Atomically: 87 // old := *ptr; 88 // *ptr = new; 89 // return old; 90 TEXT ·Xchg(SB),NOSPLIT,$0-12 91 MOVW ptr+0(FP), R2 92 MOVW new+4(FP), R5 93 SYNC 94 try_xchg: 95 MOVW R5, R3 96 LL (R2), R1 // R1 = *R2 97 SC R3, (R2) // *R2 = R3 98 BEQ R3, try_xchg 99 SYNC 100 MOVW R1, ret+8(FP) 101 RET 102 103 TEXT ·Casint32(SB),NOSPLIT,$0-13 104 JMP ·Cas(SB) 105 106 TEXT ·Casint64(SB),NOSPLIT,$0-21 107 JMP ·Cas64(SB) 108 109 TEXT ·Casuintptr(SB),NOSPLIT,$0-13 110 JMP ·Cas(SB) 111 112 TEXT ·CasRel(SB),NOSPLIT,$0-13 113 JMP ·Cas(SB) 114 115 TEXT ·Loaduintptr(SB),NOSPLIT,$0-8 116 JMP ·Load(SB) 117 118 TEXT ·Loaduint(SB),NOSPLIT,$0-8 119 JMP ·Load(SB) 120 121 TEXT ·Loadp(SB),NOSPLIT,$-0-8 122 JMP ·Load(SB) 123 124 TEXT ·Storeint32(SB),NOSPLIT,$0-8 125 JMP ·Store(SB) 126 127 TEXT ·Storeint64(SB),NOSPLIT,$0-12 128 JMP ·Store64(SB) 129 130 TEXT ·Storeuintptr(SB),NOSPLIT,$0-8 131 JMP ·Store(SB) 132 133 TEXT ·Xadduintptr(SB),NOSPLIT,$0-12 134 JMP ·Xadd(SB) 135 136 TEXT ·Loadint32(SB),NOSPLIT,$0-8 137 JMP ·Load(SB) 138 139 TEXT ·Loadint64(SB),NOSPLIT,$0-12 140 JMP ·Load64(SB) 141 142 TEXT ·Xaddint32(SB),NOSPLIT,$0-12 143 JMP ·Xadd(SB) 144 145 TEXT ·Xaddint64(SB),NOSPLIT,$0-20 146 JMP ·Xadd64(SB) 147 148 TEXT ·Casp1(SB),NOSPLIT,$0-13 149 JMP ·Cas(SB) 150 151 TEXT ·Xchgint32(SB),NOSPLIT,$0-12 152 JMP ·Xchg(SB) 153 154 TEXT ·Xchgint64(SB),NOSPLIT,$0-20 155 JMP ·Xchg64(SB) 156 157 TEXT ·Xchguintptr(SB),NOSPLIT,$0-12 158 JMP ·Xchg(SB) 159 160 TEXT ·StorepNoWB(SB),NOSPLIT,$0-8 161 JMP ·Store(SB) 162 163 TEXT ·StoreRel(SB),NOSPLIT,$0-8 164 JMP ·Store(SB) 165 166 TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8 167 JMP ·Store(SB) 168 169 // void Or8(byte volatile*, byte); 170 TEXT ·Or8(SB),NOSPLIT,$0-5 171 MOVW ptr+0(FP), R1 172 MOVBU val+4(FP), R2 173 MOVW $~3, R3 // Align ptr down to 4 bytes so we can use 32-bit load/store. 174 AND R1, R3 175 #ifdef GOARCH_mips 176 // Big endian. ptr = ptr ^ 3 177 XOR $3, R1 178 #endif 179 AND $3, R1, R4 // R4 = ((ptr & 3) * 8) 180 SLL $3, R4 181 SLL R4, R2, R2 // Shift val for aligned ptr. R2 = val << R4 182 SYNC 183 try_or8: 184 LL (R3), R4 // R4 = *R3 185 OR R2, R4 186 SC R4, (R3) // *R3 = R4 187 BEQ R4, try_or8 188 SYNC 189 RET 190 191 // void And8(byte volatile*, byte); 192 TEXT ·And8(SB),NOSPLIT,$0-5 193 MOVW ptr+0(FP), R1 194 MOVBU val+4(FP), R2 195 MOVW $~3, R3 196 AND R1, R3 197 #ifdef GOARCH_mips 198 // Big endian. ptr = ptr ^ 3 199 XOR $3, R1 200 #endif 201 AND $3, R1, R4 // R4 = ((ptr & 3) * 8) 202 SLL $3, R4 203 MOVW $0xFF, R5 204 SLL R4, R2 205 SLL R4, R5 206 NOR R0, R5 207 OR R5, R2 // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4) 208 SYNC 209 try_and8: 210 LL (R3), R4 // R4 = *R3 211 AND R2, R4 212 SC R4, (R3) // *R3 = R4 213 BEQ R4, try_and8 214 SYNC 215 RET 216 217 // func Or(addr *uint32, v uint32) 218 TEXT ·Or(SB), NOSPLIT, $0-8 219 MOVW ptr+0(FP), R1 220 MOVW val+4(FP), R2 221 222 SYNC 223 LL (R1), R3 224 OR R2, R3 225 SC R3, (R1) 226 BEQ R3, -4(PC) 227 SYNC 228 RET 229 230 // func And(addr *uint32, v uint32) 231 TEXT ·And(SB), NOSPLIT, $0-8 232 MOVW ptr+0(FP), R1 233 MOVW val+4(FP), R2 234 235 SYNC 236 LL (R1), R3 237 AND R2, R3 238 SC R3, (R1) 239 BEQ R3, -4(PC) 240 SYNC 241 RET 242 243 TEXT ·spinLock(SB),NOSPLIT,$0-4 244 MOVW state+0(FP), R1 245 MOVW $1, R2 246 SYNC 247 try_lock: 248 MOVW R2, R3 249 check_again: 250 LL (R1), R4 251 BNE R4, check_again 252 SC R3, (R1) 253 BEQ R3, try_lock 254 SYNC 255 RET 256 257 TEXT ·spinUnlock(SB),NOSPLIT,$0-4 258 MOVW state+0(FP), R1 259 SYNC 260 MOVW R0, (R1) 261 SYNC 262 RET