github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/runtime/internal/atomic/atomic_arm64.s (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 #include "textflag.h" 6 7 // uint32 runtime∕internal∕atomic·Load(uint32 volatile* addr) 8 TEXT ·Load(SB),NOSPLIT,$0-12 9 MOVD ptr+0(FP), R0 10 LDARW (R0), R0 11 MOVW R0, ret+8(FP) 12 RET 13 14 // uint64 runtime∕internal∕atomic·Load64(uint64 volatile* addr) 15 TEXT ·Load64(SB),NOSPLIT,$0-16 16 MOVD ptr+0(FP), R0 17 LDAR (R0), R0 18 MOVD R0, ret+8(FP) 19 RET 20 21 // void *runtime∕internal∕atomic·Loadp(void *volatile *addr) 22 TEXT ·Loadp(SB),NOSPLIT,$0-16 23 MOVD ptr+0(FP), R0 24 LDAR (R0), R0 25 MOVD R0, ret+8(FP) 26 RET 27 28 // uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* addr) 29 TEXT ·LoadAcq(SB),NOSPLIT,$0-12 30 B ·Load(SB) 31 32 TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16 33 B runtime∕internal∕atomic·Store64(SB) 34 35 TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12 36 B runtime∕internal∕atomic·Store(SB) 37 38 TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12 39 MOVD ptr+0(FP), R0 40 MOVW val+8(FP), R1 41 STLRW R1, (R0) 42 RET 43 44 TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16 45 MOVD ptr+0(FP), R0 46 MOVD val+8(FP), R1 47 STLR R1, (R0) 48 RET 49 50 TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20 51 again: 52 MOVD ptr+0(FP), R0 53 MOVW new+8(FP), R1 54 LDAXRW (R0), R2 55 STLXRW R1, (R0), R3 56 CBNZ R3, again 57 MOVW R2, ret+16(FP) 58 RET 59 60 TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24 61 again: 62 MOVD ptr+0(FP), R0 63 MOVD new+8(FP), R1 64 LDAXR (R0), R2 65 STLXR R1, (R0), R3 66 CBNZ R3, again 67 MOVD R2, ret+16(FP) 68 RET 69 70 // bool runtime∕internal∕atomic·Cas64(uint64 *ptr, uint64 old, uint64 new) 71 // Atomically: 72 // if(*val == *old){ 73 // *val = new; 74 // return 1; 75 // } else { 76 // return 0; 77 // } 78 TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25 79 MOVD ptr+0(FP), R0 80 MOVD old+8(FP), R1 81 MOVD new+16(FP), R2 82 again: 83 LDAXR (R0), R3 84 CMP R1, R3 85 BNE ok 86 STLXR R2, (R0), R3 87 CBNZ R3, again 88 ok: 89 CSET EQ, R0 90 MOVB R0, ret+24(FP) 91 RET 92 93 // uint32 xadd(uint32 volatile *ptr, int32 delta) 94 // Atomically: 95 // *val += delta; 96 // return *val; 97 TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20 98 again: 99 MOVD ptr+0(FP), R0 100 MOVW delta+8(FP), R1 101 LDAXRW (R0), R2 102 ADDW R2, R1, R2 103 STLXRW R2, (R0), R3 104 CBNZ R3, again 105 MOVW R2, ret+16(FP) 106 RET 107 108 TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24 109 again: 110 MOVD ptr+0(FP), R0 111 MOVD delta+8(FP), R1 112 LDAXR (R0), R2 113 ADD R2, R1, R2 114 STLXR R2, (R0), R3 115 CBNZ R3, again 116 MOVD R2, ret+16(FP) 117 RET 118 119 TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24 120 B runtime∕internal∕atomic·Xchg64(SB) 121 122 TEXT ·And8(SB), NOSPLIT, $0-9 123 MOVD ptr+0(FP), R0 124 MOVB val+8(FP), R1 125 LDAXRB (R0), R2 126 AND R1, R2 127 STLXRB R2, (R0), R3 128 CBNZ R3, -3(PC) 129 RET 130 131 TEXT ·Or8(SB), NOSPLIT, $0-9 132 MOVD ptr+0(FP), R0 133 MOVB val+8(FP), R1 134 LDAXRB (R0), R2 135 ORR R1, R2 136 STLXRB R2, (R0), R3 137 CBNZ R3, -3(PC) 138 RET 139