github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/internal/atomic/asm_mips64x.s (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build mips64 mips64le 6 7 #include "textflag.h" 8 9 // bool cas(uint32 *ptr, uint32 old, uint32 new) 10 // Atomically: 11 // if(*val == old){ 12 // *val = new; 13 // return 1; 14 // } else 15 // return 0; 16 TEXT ·Cas(SB), NOSPLIT, $0-17 17 MOVV ptr+0(FP), R1 18 MOVW old+8(FP), R2 19 MOVW new+12(FP), R5 20 SYNC 21 cas_again: 22 MOVV R5, R3 23 LL (R1), R4 24 BNE R2, R4, cas_fail 25 SC R3, (R1) 26 BEQ R3, cas_again 27 MOVV $1, R1 28 MOVB R1, ret+16(FP) 29 SYNC 30 RET 31 cas_fail: 32 MOVV $0, R1 33 JMP -4(PC) 34 35 // bool cas64(uint64 *ptr, uint64 old, uint64 new) 36 // Atomically: 37 // if(*val == *old){ 38 // *val = new; 39 // return 1; 40 // } else { 41 // return 0; 42 // } 43 TEXT ·Cas64(SB), NOSPLIT, $0-25 44 MOVV ptr+0(FP), R1 45 MOVV old+8(FP), R2 46 MOVV new+16(FP), R5 47 SYNC 48 cas64_again: 49 MOVV R5, R3 50 LLV (R1), R4 51 BNE R2, R4, cas64_fail 52 SCV R3, (R1) 53 BEQ R3, cas64_again 54 MOVV $1, R1 55 MOVB R1, ret+24(FP) 56 SYNC 57 RET 58 cas64_fail: 59 MOVV $0, R1 60 JMP -4(PC) 61 62 TEXT ·Casuintptr(SB), NOSPLIT, $0-25 63 JMP ·Cas64(SB) 64 65 TEXT ·CasRel(SB), NOSPLIT, $0-17 66 JMP ·Cas(SB) 67 68 TEXT ·Loaduintptr(SB), NOSPLIT|NOFRAME, $0-16 69 JMP ·Load64(SB) 70 71 TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16 72 JMP ·Load64(SB) 73 74 TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 75 JMP ·Store64(SB) 76 77 TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 78 JMP ·Xadd64(SB) 79 80 TEXT ·Loadint64(SB), NOSPLIT, $0-16 81 JMP ·Load64(SB) 82 83 TEXT ·Xaddint64(SB), NOSPLIT, $0-24 84 JMP ·Xadd64(SB) 85 86 // bool casp(void **val, void *old, void *new) 87 // Atomically: 88 // if(*val == old){ 89 // *val = new; 90 // return 1; 91 // } else 92 // return 0; 93 TEXT ·Casp1(SB), NOSPLIT, $0-25 94 JMP runtime∕internal∕atomic·Cas64(SB) 95 96 // uint32 xadd(uint32 volatile *ptr, int32 delta) 97 // Atomically: 98 // *val += delta; 99 // return *val; 100 TEXT ·Xadd(SB), NOSPLIT, $0-20 101 MOVV ptr+0(FP), R2 102 MOVW delta+8(FP), R3 103 SYNC 104 LL (R2), R1 105 ADDU R1, R3, R4 106 MOVV R4, R1 107 SC R4, (R2) 108 BEQ R4, -4(PC) 109 MOVW R1, ret+16(FP) 110 SYNC 111 RET 112 113 TEXT ·Xadd64(SB), NOSPLIT, $0-24 114 MOVV ptr+0(FP), R2 115 MOVV delta+8(FP), R3 116 SYNC 117 LLV (R2), R1 118 ADDVU R1, R3, R4 119 MOVV R4, R1 120 SCV R4, (R2) 121 BEQ R4, -4(PC) 122 MOVV R1, ret+16(FP) 123 SYNC 124 RET 125 126 TEXT ·Xchg(SB), NOSPLIT, $0-20 127 MOVV ptr+0(FP), R2 128 MOVW new+8(FP), R5 129 130 SYNC 131 MOVV R5, R3 132 LL (R2), R1 133 SC R3, (R2) 134 BEQ R3, -3(PC) 135 MOVW R1, ret+16(FP) 136 SYNC 137 RET 138 139 TEXT ·Xchg64(SB), NOSPLIT, $0-24 140 MOVV ptr+0(FP), R2 141 MOVV new+8(FP), R5 142 143 SYNC 144 MOVV R5, R3 145 LLV (R2), R1 146 SCV R3, (R2) 147 BEQ R3, -3(PC) 148 MOVV R1, ret+16(FP) 149 SYNC 150 RET 151 152 TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 153 JMP ·Xchg64(SB) 154 155 TEXT ·StorepNoWB(SB), NOSPLIT, $0-16 156 JMP ·Store64(SB) 157 158 TEXT ·StoreRel(SB), NOSPLIT, $0-12 159 JMP ·Store(SB) 160 161 TEXT ·StoreRel64(SB), NOSPLIT, $0-16 162 JMP ·Store64(SB) 163 164 TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16 165 JMP ·Store64(SB) 166 167 TEXT ·Store(SB), NOSPLIT, $0-12 168 MOVV ptr+0(FP), R1 169 MOVW val+8(FP), R2 170 SYNC 171 MOVW R2, 0(R1) 172 SYNC 173 RET 174 175 TEXT ·Store8(SB), NOSPLIT, $0-9 176 MOVV ptr+0(FP), R1 177 MOVB val+8(FP), R2 178 SYNC 179 MOVB R2, 0(R1) 180 SYNC 181 RET 182 183 TEXT ·Store64(SB), NOSPLIT, $0-16 184 MOVV ptr+0(FP), R1 185 MOVV val+8(FP), R2 186 SYNC 187 MOVV R2, 0(R1) 188 SYNC 189 RET 190 191 // void Or8(byte volatile*, byte); 192 TEXT ·Or8(SB), NOSPLIT, $0-9 193 MOVV ptr+0(FP), R1 194 MOVBU val+8(FP), R2 195 // Align ptr down to 4 bytes so we can use 32-bit load/store. 196 MOVV $~3, R3 197 AND R1, R3 198 // Compute val shift. 199 #ifdef GOARCH_mips64 200 // Big endian. ptr = ptr ^ 3 201 XOR $3, R1 202 #endif 203 // R4 = ((ptr & 3) * 8) 204 AND $3, R1, R4 205 SLLV $3, R4 206 // Shift val for aligned ptr. R2 = val << R4 207 SLLV R4, R2 208 209 SYNC 210 LL (R3), R4 211 OR R2, R4 212 SC R4, (R3) 213 BEQ R4, -4(PC) 214 SYNC 215 RET 216 217 // void And8(byte volatile*, byte); 218 TEXT ·And8(SB), NOSPLIT, $0-9 219 MOVV ptr+0(FP), R1 220 MOVBU val+8(FP), R2 221 // Align ptr down to 4 bytes so we can use 32-bit load/store. 222 MOVV $~3, R3 223 AND R1, R3 224 // Compute val shift. 225 #ifdef GOARCH_mips64 226 // Big endian. ptr = ptr ^ 3 227 XOR $3, R1 228 #endif 229 // R4 = ((ptr & 3) * 8) 230 AND $3, R1, R4 231 SLLV $3, R4 232 // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4) 233 MOVV $0xFF, R5 234 SLLV R4, R2 235 SLLV R4, R5 236 NOR R0, R5 237 OR R5, R2 238 239 SYNC 240 LL (R3), R4 241 AND R2, R4 242 SC R4, (R3) 243 BEQ R4, -4(PC) 244 SYNC 245 RET 246 247 // func Or(addr *uint32, v uint32) 248 TEXT ·Or(SB), NOSPLIT, $0-12 249 MOVV ptr+0(FP), R1 250 MOVW val+8(FP), R2 251 252 SYNC 253 LL (R1), R3 254 OR R2, R3 255 SC R3, (R1) 256 BEQ R3, -4(PC) 257 SYNC 258 RET 259 260 // func And(addr *uint32, v uint32) 261 TEXT ·And(SB), NOSPLIT, $0-12 262 MOVV ptr+0(FP), R1 263 MOVW val+8(FP), R2 264 265 SYNC 266 LL (R1), R3 267 AND R2, R3 268 SC R3, (R1) 269 BEQ R3, -4(PC) 270 SYNC 271 RET