github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/internal/atomic/atomic_arm64.s (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "textflag.h"
     6  
     7  // uint32 runtime∕internal∕atomic·Load(uint32 volatile* addr)
     8  TEXT ·Load(SB),NOSPLIT,$0-12
     9  	MOVD	ptr+0(FP), R0
    10  	LDARW	(R0), R0
    11  	MOVW	R0, ret+8(FP)
    12  	RET
    13  
    14  // uint8 runtime∕internal∕atomic·Load8(uint8 volatile* addr)
    15  TEXT ·Load8(SB),NOSPLIT,$0-9
    16  	MOVD	ptr+0(FP), R0
    17  	LDARB	(R0), R0
    18  	MOVB	R0, ret+8(FP)
    19  	RET
    20  
    21  // uint64 runtime∕internal∕atomic·Load64(uint64 volatile* addr)
    22  TEXT ·Load64(SB),NOSPLIT,$0-16
    23  	MOVD	ptr+0(FP), R0
    24  	LDAR	(R0), R0
    25  	MOVD	R0, ret+8(FP)
    26  	RET
    27  
    28  // void *runtime∕internal∕atomic·Loadp(void *volatile *addr)
    29  TEXT ·Loadp(SB),NOSPLIT,$0-16
    30  	MOVD	ptr+0(FP), R0
    31  	LDAR	(R0), R0
    32  	MOVD	R0, ret+8(FP)
    33  	RET
    34  
    35  // uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* addr)
    36  TEXT ·LoadAcq(SB),NOSPLIT,$0-12
    37  	B	·Load(SB)
    38  
    39  // uint64 runtime∕internal∕atomic·LoadAcquintptr(uint64 volatile* addr)
    40  TEXT ·LoadAcq64(SB),NOSPLIT,$0-16
    41  	B	·Load64(SB)
    42  
    43  // uintptr runtime∕internal∕atomic·LoadAcq64(uintptr volatile* addr)
    44  TEXT ·LoadAcquintptr(SB),NOSPLIT,$0-16
    45  	B	·Load64(SB)
    46  
    47  TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16
    48  	B	runtime∕internal∕atomic·Store64(SB)
    49  
    50  TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12
    51  	B	runtime∕internal∕atomic·Store(SB)
    52  
    53  TEXT runtime∕internal∕atomic·StoreRel64(SB), NOSPLIT, $0-16
    54  	B	runtime∕internal∕atomic·Store64(SB)
    55  
    56  TEXT runtime∕internal∕atomic·StoreReluintptr(SB), NOSPLIT, $0-16
    57  	B	runtime∕internal∕atomic·Store64(SB)
    58  
    59  TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
    60  	MOVD	ptr+0(FP), R0
    61  	MOVW	val+8(FP), R1
    62  	STLRW	R1, (R0)
    63  	RET
    64  
    65  TEXT runtime∕internal∕atomic·Store8(SB), NOSPLIT, $0-9
    66  	MOVD	ptr+0(FP), R0
    67  	MOVB	val+8(FP), R1
    68  	STLRB	R1, (R0)
    69  	RET
    70  
    71  TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
    72  	MOVD	ptr+0(FP), R0
    73  	MOVD	val+8(FP), R1
    74  	STLR	R1, (R0)
    75  	RET
    76  
    77  TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20
    78  	MOVD	ptr+0(FP), R0
    79  	MOVW	new+8(FP), R1
    80  again:
    81  	LDAXRW	(R0), R2
    82  	STLXRW	R1, (R0), R3
    83  	CBNZ	R3, again
    84  	MOVW	R2, ret+16(FP)
    85  	RET
    86  
    87  TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24
    88  	MOVD	ptr+0(FP), R0
    89  	MOVD	new+8(FP), R1
    90  again:
    91  	LDAXR	(R0), R2
    92  	STLXR	R1, (R0), R3
    93  	CBNZ	R3, again
    94  	MOVD	R2, ret+16(FP)
    95  	RET
    96  
    97  // bool runtime∕internal∕atomic·Cas64(uint64 *ptr, uint64 old, uint64 new)
    98  // Atomically:
    99  //      if(*val == *old){
   100  //              *val = new;
   101  //              return 1;
   102  //      } else {
   103  //              return 0;
   104  //      }
   105  TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
   106  	MOVD	ptr+0(FP), R0
   107  	MOVD	old+8(FP), R1
   108  	MOVD	new+16(FP), R2
   109  again:
   110  	LDAXR	(R0), R3
   111  	CMP	R1, R3
   112  	BNE	ok
   113  	STLXR	R2, (R0), R3
   114  	CBNZ	R3, again
   115  ok:
   116  	CSET	EQ, R0
   117  	MOVB	R0, ret+24(FP)
   118  	RET
   119  
   120  // uint32 xadd(uint32 volatile *ptr, int32 delta)
   121  // Atomically:
   122  //      *val += delta;
   123  //      return *val;
   124  TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20
   125  	MOVD	ptr+0(FP), R0
   126  	MOVW	delta+8(FP), R1
   127  again:
   128  	LDAXRW	(R0), R2
   129  	ADDW	R2, R1, R2
   130  	STLXRW	R2, (R0), R3
   131  	CBNZ	R3, again
   132  	MOVW	R2, ret+16(FP)
   133  	RET
   134  
   135  TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24
   136  	MOVD	ptr+0(FP), R0
   137  	MOVD	delta+8(FP), R1
   138  again:
   139  	LDAXR	(R0), R2
   140  	ADD	R2, R1, R2
   141  	STLXR	R2, (R0), R3
   142  	CBNZ	R3, again
   143  	MOVD	R2, ret+16(FP)
   144  	RET
   145  
   146  TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24
   147  	B	runtime∕internal∕atomic·Xchg64(SB)
   148  
   149  TEXT ·And8(SB), NOSPLIT, $0-9
   150  	MOVD	ptr+0(FP), R0
   151  	MOVB	val+8(FP), R1
   152  	LDAXRB	(R0), R2
   153  	AND	R1, R2
   154  	STLXRB	R2, (R0), R3
   155  	CBNZ	R3, -3(PC)
   156  	RET
   157  
   158  TEXT ·Or8(SB), NOSPLIT, $0-9
   159  	MOVD	ptr+0(FP), R0
   160  	MOVB	val+8(FP), R1
   161  	LDAXRB	(R0), R2
   162  	ORR	R1, R2
   163  	STLXRB	R2, (R0), R3
   164  	CBNZ	R3, -3(PC)
   165  	RET
   166  
   167  // func And(addr *uint32, v uint32)
   168  TEXT ·And(SB), NOSPLIT, $0-12
   169  	MOVD	ptr+0(FP), R0
   170  	MOVW	val+8(FP), R1
   171  	LDAXRW	(R0), R2
   172  	AND	R1, R2
   173  	STLXRW	R2, (R0), R3
   174  	CBNZ	R3, -3(PC)
   175  	RET
   176  
   177  // func Or(addr *uint32, v uint32)
   178  TEXT ·Or(SB), NOSPLIT, $0-12
   179  	MOVD	ptr+0(FP), R0
   180  	MOVW	val+8(FP), R1
   181  	LDAXRW	(R0), R2
   182  	ORR	R1, R2
   183  	STLXRW	R2, (R0), R3
   184  	CBNZ	R3, -3(PC)
   185  	RET