github.com/c12o16h1/go/src@v0.0.0-20200114212001-5a151c0f00ed/runtime/internal/atomic/asm_mipsx.s (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build mips mipsle 6 7 #include "textflag.h" 8 9 TEXT ·Cas(SB),NOSPLIT,$0-13 10 MOVW ptr+0(FP), R1 11 MOVW old+4(FP), R2 12 MOVW new+8(FP), R5 13 SYNC 14 try_cas: 15 MOVW R5, R3 16 LL (R1), R4 // R4 = *R1 17 BNE R2, R4, cas_fail 18 SC R3, (R1) // *R1 = R3 19 BEQ R3, try_cas 20 SYNC 21 MOVB R3, ret+12(FP) 22 RET 23 cas_fail: 24 MOVB R0, ret+12(FP) 25 RET 26 27 TEXT ·Store(SB),NOSPLIT,$0-8 28 MOVW ptr+0(FP), R1 29 MOVW val+4(FP), R2 30 SYNC 31 MOVW R2, 0(R1) 32 SYNC 33 RET 34 35 TEXT ·Store8(SB),NOSPLIT,$0-5 36 MOVW ptr+0(FP), R1 37 MOVB val+4(FP), R2 38 SYNC 39 MOVB R2, 0(R1) 40 SYNC 41 RET 42 43 TEXT ·Load(SB),NOSPLIT,$0-8 44 MOVW ptr+0(FP), R1 45 SYNC 46 MOVW 0(R1), R1 47 SYNC 48 MOVW R1, ret+4(FP) 49 RET 50 51 TEXT ·Load8(SB),NOSPLIT,$0-5 52 MOVW ptr+0(FP), R1 53 SYNC 54 MOVB 0(R1), R1 55 SYNC 56 MOVB R1, ret+4(FP) 57 RET 58 59 TEXT ·Xadd(SB),NOSPLIT,$0-12 60 MOVW ptr+0(FP), R2 61 MOVW delta+4(FP), R3 62 SYNC 63 try_xadd: 64 LL (R2), R1 // R1 = *R2 65 ADDU R1, R3, R4 66 MOVW R4, R1 67 SC R4, (R2) // *R2 = R4 68 BEQ R4, try_xadd 69 SYNC 70 MOVW R1, ret+8(FP) 71 RET 72 73 TEXT ·Xchg(SB),NOSPLIT,$0-12 74 MOVW ptr+0(FP), R2 75 MOVW new+4(FP), R5 76 SYNC 77 try_xchg: 78 MOVW R5, R3 79 LL (R2), R1 // R1 = *R2 80 SC R3, (R2) // *R2 = R3 81 BEQ R3, try_xchg 82 SYNC 83 MOVW R1, ret+8(FP) 84 RET 85 86 TEXT ·Casuintptr(SB),NOSPLIT,$0-13 87 JMP ·Cas(SB) 88 89 TEXT ·CasRel(SB),NOSPLIT,$0-13 90 JMP ·Cas(SB) 91 92 TEXT ·Loaduintptr(SB),NOSPLIT,$0-8 93 JMP ·Load(SB) 94 95 TEXT ·Loaduint(SB),NOSPLIT,$0-8 96 JMP ·Load(SB) 97 98 TEXT ·Loadp(SB),NOSPLIT,$-0-8 99 JMP ·Load(SB) 100 101 TEXT ·Storeuintptr(SB),NOSPLIT,$0-8 102 JMP ·Store(SB) 103 104 TEXT ·Xadduintptr(SB),NOSPLIT,$0-12 105 JMP ·Xadd(SB) 106 107 TEXT ·Loadint64(SB),NOSPLIT,$0-12 108 JMP ·Load64(SB) 109 110 TEXT ·Xaddint64(SB),NOSPLIT,$0-20 111 JMP ·Xadd64(SB) 112 113 TEXT ·Casp1(SB),NOSPLIT,$0-13 114 JMP ·Cas(SB) 115 116 TEXT ·Xchguintptr(SB),NOSPLIT,$0-12 117 JMP ·Xchg(SB) 118 119 TEXT ·StorepNoWB(SB),NOSPLIT,$0-8 120 JMP ·Store(SB) 121 122 TEXT ·StoreRel(SB),NOSPLIT,$0-8 123 JMP ·Store(SB) 124 125 // void Or8(byte volatile*, byte); 126 TEXT ·Or8(SB),NOSPLIT,$0-5 127 MOVW ptr+0(FP), R1 128 MOVBU val+4(FP), R2 129 MOVW $~3, R3 // Align ptr down to 4 bytes so we can use 32-bit load/store. 130 AND R1, R3 131 #ifdef GOARCH_mips 132 // Big endian. ptr = ptr ^ 3 133 XOR $3, R1 134 #endif 135 AND $3, R1, R4 // R4 = ((ptr & 3) * 8) 136 SLL $3, R4 137 SLL R4, R2, R2 // Shift val for aligned ptr. R2 = val << R4 138 SYNC 139 try_or8: 140 LL (R3), R4 // R4 = *R3 141 OR R2, R4 142 SC R4, (R3) // *R3 = R4 143 BEQ R4, try_or8 144 SYNC 145 RET 146 147 // void And8(byte volatile*, byte); 148 TEXT ·And8(SB),NOSPLIT,$0-5 149 MOVW ptr+0(FP), R1 150 MOVBU val+4(FP), R2 151 MOVW $~3, R3 152 AND R1, R3 153 #ifdef GOARCH_mips 154 // Big endian. ptr = ptr ^ 3 155 XOR $3, R1 156 #endif 157 AND $3, R1, R4 // R4 = ((ptr & 3) * 8) 158 SLL $3, R4 159 MOVW $0xFF, R5 160 SLL R4, R2 161 SLL R4, R5 162 NOR R0, R5 163 OR R5, R2 // Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4) 164 SYNC 165 try_and8: 166 LL (R3), R4 // R4 = *R3 167 AND R2, R4 168 SC R4, (R3) // *R3 = R4 169 BEQ R4, try_and8 170 SYNC 171 RET