github.com/c12o16h1/go/src@v0.0.0-20200114212001-5a151c0f00ed/runtime/internal/atomic/atomic_arm64.s (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 #include "textflag.h" 6 7 // uint32 runtime∕internal∕atomic·Load(uint32 volatile* addr) 8 TEXT ·Load(SB),NOSPLIT,$0-12 9 MOVD ptr+0(FP), R0 10 LDARW (R0), R0 11 MOVW R0, ret+8(FP) 12 RET 13 14 // uint8 runtime∕internal∕atomic·Load8(uint8 volatile* addr) 15 TEXT ·Load8(SB),NOSPLIT,$0-9 16 MOVD ptr+0(FP), R0 17 LDARB (R0), R0 18 MOVB R0, ret+8(FP) 19 RET 20 21 // uint64 runtime∕internal∕atomic·Load64(uint64 volatile* addr) 22 TEXT ·Load64(SB),NOSPLIT,$0-16 23 MOVD ptr+0(FP), R0 24 LDAR (R0), R0 25 MOVD R0, ret+8(FP) 26 RET 27 28 // void *runtime∕internal∕atomic·Loadp(void *volatile *addr) 29 TEXT ·Loadp(SB),NOSPLIT,$0-16 30 MOVD ptr+0(FP), R0 31 LDAR (R0), R0 32 MOVD R0, ret+8(FP) 33 RET 34 35 // uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* addr) 36 TEXT ·LoadAcq(SB),NOSPLIT,$0-12 37 B ·Load(SB) 38 39 TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16 40 B runtime∕internal∕atomic·Store64(SB) 41 42 TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12 43 B runtime∕internal∕atomic·Store(SB) 44 45 TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12 46 MOVD ptr+0(FP), R0 47 MOVW val+8(FP), R1 48 STLRW R1, (R0) 49 RET 50 51 TEXT runtime∕internal∕atomic·Store8(SB), NOSPLIT, $0-9 52 MOVD ptr+0(FP), R0 53 MOVB val+8(FP), R1 54 STLRB R1, (R0) 55 RET 56 57 TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16 58 MOVD ptr+0(FP), R0 59 MOVD val+8(FP), R1 60 STLR R1, (R0) 61 RET 62 63 TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20 64 again: 65 MOVD ptr+0(FP), R0 66 MOVW new+8(FP), R1 67 LDAXRW (R0), R2 68 STLXRW R1, (R0), R3 69 CBNZ R3, again 70 MOVW R2, ret+16(FP) 71 RET 72 73 TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24 74 again: 75 MOVD ptr+0(FP), R0 76 MOVD new+8(FP), R1 77 LDAXR (R0), R2 78 STLXR R1, (R0), R3 79 CBNZ R3, again 80 MOVD R2, ret+16(FP) 81 RET 82 83 // bool runtime∕internal∕atomic·Cas64(uint64 *ptr, uint64 old, uint64 new) 84 // Atomically: 85 // if(*val == *old){ 86 // *val = new; 87 // return 1; 88 // } else { 89 // return 0; 90 // } 91 TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25 92 MOVD ptr+0(FP), R0 93 MOVD old+8(FP), R1 94 MOVD new+16(FP), R2 95 again: 96 LDAXR (R0), R3 97 CMP R1, R3 98 BNE ok 99 STLXR R2, (R0), R3 100 CBNZ R3, again 101 ok: 102 CSET EQ, R0 103 MOVB R0, ret+24(FP) 104 RET 105 106 // uint32 xadd(uint32 volatile *ptr, int32 delta) 107 // Atomically: 108 // *val += delta; 109 // return *val; 110 TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20 111 again: 112 MOVD ptr+0(FP), R0 113 MOVW delta+8(FP), R1 114 LDAXRW (R0), R2 115 ADDW R2, R1, R2 116 STLXRW R2, (R0), R3 117 CBNZ R3, again 118 MOVW R2, ret+16(FP) 119 RET 120 121 TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24 122 again: 123 MOVD ptr+0(FP), R0 124 MOVD delta+8(FP), R1 125 LDAXR (R0), R2 126 ADD R2, R1, R2 127 STLXR R2, (R0), R3 128 CBNZ R3, again 129 MOVD R2, ret+16(FP) 130 RET 131 132 TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24 133 B runtime∕internal∕atomic·Xchg64(SB) 134 135 TEXT ·And8(SB), NOSPLIT, $0-9 136 MOVD ptr+0(FP), R0 137 MOVB val+8(FP), R1 138 LDAXRB (R0), R2 139 AND R1, R2 140 STLXRB R2, (R0), R3 141 CBNZ R3, -3(PC) 142 RET 143 144 TEXT ·Or8(SB), NOSPLIT, $0-9 145 MOVD ptr+0(FP), R0 146 MOVB val+8(FP), R1 147 LDAXRB (R0), R2 148 ORR R1, R2 149 STLXRB R2, (R0), R3 150 CBNZ R3, -3(PC) 151 RET 152