github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/internal/atomic/asm_386.s (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 #include "textflag.h" 6 #include "funcdata.h" 7 8 // bool Cas(int32 *val, int32 old, int32 new) 9 // Atomically: 10 // if(*val == old){ 11 // *val = new; 12 // return 1; 13 // }else 14 // return 0; 15 TEXT ·Cas(SB), NOSPLIT, $0-13 16 MOVL ptr+0(FP), BX 17 MOVL old+4(FP), AX 18 MOVL new+8(FP), CX 19 LOCK 20 CMPXCHGL CX, 0(BX) 21 SETEQ ret+12(FP) 22 RET 23 24 TEXT ·Casuintptr(SB), NOSPLIT, $0-13 25 JMP ·Cas(SB) 26 27 TEXT ·CasRel(SB), NOSPLIT, $0-13 28 JMP ·Cas(SB) 29 30 TEXT ·Loaduintptr(SB), NOSPLIT, $0-8 31 JMP ·Load(SB) 32 33 TEXT ·Loaduint(SB), NOSPLIT, $0-8 34 JMP ·Load(SB) 35 36 TEXT ·Storeuintptr(SB), NOSPLIT, $0-8 37 JMP ·Store(SB) 38 39 TEXT ·Xadduintptr(SB), NOSPLIT, $0-12 40 JMP ·Xadd(SB) 41 42 TEXT ·Loadint64(SB), NOSPLIT, $0-12 43 JMP ·Load64(SB) 44 45 TEXT ·Xaddint64(SB), NOSPLIT, $0-20 46 JMP ·Xadd64(SB) 47 48 // bool ·Cas64(uint64 *val, uint64 old, uint64 new) 49 // Atomically: 50 // if(*val == *old){ 51 // *val = new; 52 // return 1; 53 // } else { 54 // return 0; 55 // } 56 TEXT ·Cas64(SB), NOSPLIT, $0-21 57 NO_LOCAL_POINTERS 58 MOVL ptr+0(FP), BP 59 TESTL $7, BP 60 JZ 2(PC) 61 CALL ·panicUnaligned(SB) 62 MOVL old_lo+4(FP), AX 63 MOVL old_hi+8(FP), DX 64 MOVL new_lo+12(FP), BX 65 MOVL new_hi+16(FP), CX 66 LOCK 67 CMPXCHG8B 0(BP) 68 SETEQ ret+20(FP) 69 RET 70 71 // bool Casp1(void **p, void *old, void *new) 72 // Atomically: 73 // if(*p == old){ 74 // *p = new; 75 // return 1; 76 // }else 77 // return 0; 78 TEXT ·Casp1(SB), NOSPLIT, $0-13 79 MOVL ptr+0(FP), BX 80 MOVL old+4(FP), AX 81 MOVL new+8(FP), CX 82 LOCK 83 CMPXCHGL CX, 0(BX) 84 SETEQ ret+12(FP) 85 RET 86 87 // uint32 Xadd(uint32 volatile *val, int32 delta) 88 // Atomically: 89 // *val += delta; 90 // return *val; 91 TEXT ·Xadd(SB), NOSPLIT, $0-12 92 MOVL ptr+0(FP), BX 93 MOVL delta+4(FP), AX 94 MOVL AX, CX 95 LOCK 96 XADDL AX, 0(BX) 97 ADDL CX, AX 98 MOVL AX, ret+8(FP) 99 RET 100 101 TEXT ·Xadd64(SB), NOSPLIT, $0-20 102 NO_LOCAL_POINTERS 103 // no XADDQ so use CMPXCHG8B loop 104 MOVL ptr+0(FP), BP 105 TESTL $7, BP 106 JZ 2(PC) 107 CALL ·panicUnaligned(SB) 108 // DI:SI = delta 109 MOVL delta_lo+4(FP), SI 110 MOVL delta_hi+8(FP), DI 111 // DX:AX = *addr 112 MOVL 0(BP), AX 113 MOVL 4(BP), DX 114 addloop: 115 // CX:BX = DX:AX (*addr) + DI:SI (delta) 116 MOVL AX, BX 117 MOVL DX, CX 118 ADDL SI, BX 119 ADCL DI, CX 120 121 // if *addr == DX:AX { 122 // *addr = CX:BX 123 // } else { 124 // DX:AX = *addr 125 // } 126 // all in one instruction 127 LOCK 128 CMPXCHG8B 0(BP) 129 130 JNZ addloop 131 132 // success 133 // return CX:BX 134 MOVL BX, ret_lo+12(FP) 135 MOVL CX, ret_hi+16(FP) 136 RET 137 138 TEXT ·Xchg(SB), NOSPLIT, $0-12 139 MOVL ptr+0(FP), BX 140 MOVL new+4(FP), AX 141 XCHGL AX, 0(BX) 142 MOVL AX, ret+8(FP) 143 RET 144 145 TEXT ·Xchguintptr(SB), NOSPLIT, $0-12 146 JMP ·Xchg(SB) 147 148 TEXT ·Xchg64(SB),NOSPLIT,$0-20 149 NO_LOCAL_POINTERS 150 // no XCHGQ so use CMPXCHG8B loop 151 MOVL ptr+0(FP), BP 152 TESTL $7, BP 153 JZ 2(PC) 154 CALL ·panicUnaligned(SB) 155 // CX:BX = new 156 MOVL new_lo+4(FP), BX 157 MOVL new_hi+8(FP), CX 158 // DX:AX = *addr 159 MOVL 0(BP), AX 160 MOVL 4(BP), DX 161 swaploop: 162 // if *addr == DX:AX 163 // *addr = CX:BX 164 // else 165 // DX:AX = *addr 166 // all in one instruction 167 LOCK 168 CMPXCHG8B 0(BP) 169 JNZ swaploop 170 171 // success 172 // return DX:AX 173 MOVL AX, ret_lo+12(FP) 174 MOVL DX, ret_hi+16(FP) 175 RET 176 177 TEXT ·StorepNoWB(SB), NOSPLIT, $0-8 178 MOVL ptr+0(FP), BX 179 MOVL val+4(FP), AX 180 XCHGL AX, 0(BX) 181 RET 182 183 TEXT ·Store(SB), NOSPLIT, $0-8 184 MOVL ptr+0(FP), BX 185 MOVL val+4(FP), AX 186 XCHGL AX, 0(BX) 187 RET 188 189 TEXT ·StoreRel(SB), NOSPLIT, $0-8 190 JMP ·Store(SB) 191 192 TEXT runtime∕internal∕atomic·StoreReluintptr(SB), NOSPLIT, $0-8 193 JMP runtime∕internal∕atomic·Store(SB) 194 195 // uint64 atomicload64(uint64 volatile* addr); 196 TEXT ·Load64(SB), NOSPLIT, $0-12 197 NO_LOCAL_POINTERS 198 MOVL ptr+0(FP), AX 199 TESTL $7, AX 200 JZ 2(PC) 201 CALL ·panicUnaligned(SB) 202 MOVQ (AX), M0 203 MOVQ M0, ret+4(FP) 204 EMMS 205 RET 206 207 // void ·Store64(uint64 volatile* addr, uint64 v); 208 TEXT ·Store64(SB), NOSPLIT, $0-12 209 NO_LOCAL_POINTERS 210 MOVL ptr+0(FP), AX 211 TESTL $7, AX 212 JZ 2(PC) 213 CALL ·panicUnaligned(SB) 214 // MOVQ and EMMS were introduced on the Pentium MMX. 215 MOVQ val+4(FP), M0 216 MOVQ M0, (AX) 217 EMMS 218 // This is essentially a no-op, but it provides required memory fencing. 219 // It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2). 220 XORL AX, AX 221 LOCK 222 XADDL AX, (SP) 223 RET 224 225 // void ·Or8(byte volatile*, byte); 226 TEXT ·Or8(SB), NOSPLIT, $0-5 227 MOVL ptr+0(FP), AX 228 MOVB val+4(FP), BX 229 LOCK 230 ORB BX, (AX) 231 RET 232 233 // void ·And8(byte volatile*, byte); 234 TEXT ·And8(SB), NOSPLIT, $0-5 235 MOVL ptr+0(FP), AX 236 MOVB val+4(FP), BX 237 LOCK 238 ANDB BX, (AX) 239 RET 240 241 TEXT ·Store8(SB), NOSPLIT, $0-5 242 MOVL ptr+0(FP), BX 243 MOVB val+4(FP), AX 244 XCHGB AX, 0(BX) 245 RET 246 247 // func Or(addr *uint32, v uint32) 248 TEXT ·Or(SB), NOSPLIT, $0-8 249 MOVL ptr+0(FP), AX 250 MOVL val+4(FP), BX 251 LOCK 252 ORL BX, (AX) 253 RET 254 255 // func And(addr *uint32, v uint32) 256 TEXT ·And(SB), NOSPLIT, $0-8 257 MOVL ptr+0(FP), AX 258 MOVL val+4(FP), BX 259 LOCK 260 ANDL BX, (AX) 261 RET