github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/internal/atomic/atomic_riscv64.s (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // RISC-V's atomic operations have two bits, aq ("acquire") and rl ("release"),
     6  // which may be toggled on and off. Their precise semantics are defined in
     7  // section 6.3 of the specification, but the basic idea is as follows:
     8  //
     9  //   - If neither aq nor rl is set, the CPU may reorder the atomic arbitrarily.
    10  //     It guarantees only that it will execute atomically.
    11  //
    12  //   - If aq is set, the CPU may move the instruction backward, but not forward.
    13  //
    14  //   - If rl is set, the CPU may move the instruction forward, but not backward.
    15  //
    16  //   - If both are set, the CPU may not reorder the instruction at all.
    17  //
    18  // These four modes correspond to other well-known memory models on other CPUs.
    19  // On ARM, aq corresponds to a dmb ishst, aq+rl corresponds to a dmb ish. On
    20  // Intel, aq corresponds to an lfence, rl to an sfence, and aq+rl to an mfence
    21  // (or a lock prefix).
    22  //
    23  // Go's memory model requires that
    24  //   - if a read happens after a write, the read must observe the write, and
    25  //     that
    26  //   - if a read happens concurrently with a write, the read may observe the
    27  //     write.
    28  // aq is sufficient to guarantee this, so that's what we use here. (This jibes
    29  // with ARM, which uses dmb ishst.)
    30  
    31  #include "textflag.h"
    32  
    33  // Atomically:
    34  //      if(*val == *old){
    35  //              *val = new;
    36  //              return 1;
    37  //      } else {
    38  //              return 0;
    39  //      }
    40  
    41  TEXT ·Cas(SB), NOSPLIT, $0-17
    42  	MOV	ptr+0(FP), A0
    43  	MOVW	old+8(FP), A1
    44  	MOVW	new+12(FP), A2
    45  cas_again:
    46  	LRW	(A0), A3
    47  	BNE	A3, A1, cas_fail
    48  	SCW	A2, (A0), A4
    49  	BNE	A4, ZERO, cas_again
    50  	MOV	$1, A0
    51  	MOVB	A0, ret+16(FP)
    52  	RET
    53  cas_fail:
    54  	MOV	$0, A0
    55  	MOV	A0, ret+16(FP)
    56  	RET
    57  
    58  // func Cas64(ptr *uint64, old, new uint64) bool
    59  TEXT ·Cas64(SB), NOSPLIT, $0-25
    60  	MOV	ptr+0(FP), A0
    61  	MOV	old+8(FP), A1
    62  	MOV	new+16(FP), A2
    63  cas_again:
    64  	LRD	(A0), A3
    65  	BNE	A3, A1, cas_fail
    66  	SCD	A2, (A0), A4
    67  	BNE	A4, ZERO, cas_again
    68  	MOV	$1, A0
    69  	MOVB	A0, ret+24(FP)
    70  	RET
    71  cas_fail:
    72  	MOVB	ZERO, ret+24(FP)
    73  	RET
    74  
    75  // func Load(ptr *uint32) uint32
    76  TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12
    77  	MOV	ptr+0(FP), A0
    78  	LRW	(A0), A0
    79  	MOVW	A0, ret+8(FP)
    80  	RET
    81  
    82  // func Load8(ptr *uint8) uint8
    83  TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9
    84  	MOV	ptr+0(FP), A0
    85  	FENCE
    86  	MOVBU	(A0), A1
    87  	FENCE
    88  	MOVB	A1, ret+8(FP)
    89  	RET
    90  
    91  // func Load64(ptr *uint64) uint64
    92  TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16
    93  	MOV	ptr+0(FP), A0
    94  	LRD	(A0), A0
    95  	MOV	A0, ret+8(FP)
    96  	RET
    97  
    98  // func Store(ptr *uint32, val uint32)
    99  TEXT ·Store(SB), NOSPLIT, $0-12
   100  	MOV	ptr+0(FP), A0
   101  	MOVW	val+8(FP), A1
   102  	AMOSWAPW A1, (A0), ZERO
   103  	RET
   104  
   105  // func Store8(ptr *uint8, val uint8)
   106  TEXT ·Store8(SB), NOSPLIT, $0-9
   107  	MOV	ptr+0(FP), A0
   108  	MOVBU	val+8(FP), A1
   109  	FENCE
   110  	MOVB	A1, (A0)
   111  	FENCE
   112  	RET
   113  
   114  // func Store64(ptr *uint64, val uint64)
   115  TEXT ·Store64(SB), NOSPLIT, $0-16
   116  	MOV	ptr+0(FP), A0
   117  	MOV	val+8(FP), A1
   118  	AMOSWAPD A1, (A0), ZERO
   119  	RET
   120  
   121  TEXT ·Casp1(SB), NOSPLIT, $0-25
   122  	JMP	·Cas64(SB)
   123  
   124  TEXT ·Casuintptr(SB),NOSPLIT,$0-25
   125  	JMP	·Cas64(SB)
   126  
   127  TEXT ·CasRel(SB), NOSPLIT, $0-17
   128  	JMP	·Cas(SB)
   129  
   130  TEXT ·Loaduintptr(SB),NOSPLIT,$0-16
   131  	JMP	·Load64(SB)
   132  
   133  TEXT ·Storeuintptr(SB),NOSPLIT,$0-16
   134  	JMP	·Store64(SB)
   135  
   136  TEXT ·Loaduint(SB),NOSPLIT,$0-16
   137  	JMP ·Loaduintptr(SB)
   138  
   139  TEXT ·Loadint64(SB),NOSPLIT,$0-16
   140  	JMP ·Loaduintptr(SB)
   141  
   142  TEXT ·Xaddint64(SB),NOSPLIT,$0-24
   143  	MOV	ptr+0(FP), A0
   144  	MOV	delta+8(FP), A1
   145  	AMOADDD A1, (A0), A0
   146  	ADD	A0, A1, A0
   147  	MOVW	A0, ret+16(FP)
   148  	RET
   149  
   150  TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12
   151  	JMP	·Load(SB)
   152  
   153  TEXT ·LoadAcq64(SB),NOSPLIT|NOFRAME,$0-16
   154  	JMP	·Load64(SB)
   155  
   156  TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16
   157  	JMP	·Load64(SB)
   158  
   159  // func Loadp(ptr unsafe.Pointer) unsafe.Pointer
   160  TEXT ·Loadp(SB),NOSPLIT,$0-16
   161  	JMP	·Load64(SB)
   162  
   163  // func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
   164  TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
   165  	JMP	·Store64(SB)
   166  
   167  TEXT ·StoreRel(SB), NOSPLIT, $0-12
   168  	JMP	·Store(SB)
   169  
   170  TEXT ·StoreRel64(SB), NOSPLIT, $0-16
   171  	JMP	·Store64(SB)
   172  
   173  TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
   174  	JMP	·Store64(SB)
   175  
   176  // func Xchg(ptr *uint32, new uint32) uint32
   177  TEXT ·Xchg(SB), NOSPLIT, $0-20
   178  	MOV	ptr+0(FP), A0
   179  	MOVW	new+8(FP), A1
   180  	AMOSWAPW A1, (A0), A1
   181  	MOVW	A1, ret+16(FP)
   182  	RET
   183  
   184  // func Xchg64(ptr *uint64, new uint64) uint64
   185  TEXT ·Xchg64(SB), NOSPLIT, $0-24
   186  	MOV	ptr+0(FP), A0
   187  	MOV	new+8(FP), A1
   188  	AMOSWAPD A1, (A0), A1
   189  	MOV	A1, ret+16(FP)
   190  	RET
   191  
   192  // Atomically:
   193  //      *val += delta;
   194  //      return *val;
   195  
   196  // func Xadd(ptr *uint32, delta int32) uint32
   197  TEXT ·Xadd(SB), NOSPLIT, $0-20
   198  	MOV	ptr+0(FP), A0
   199  	MOVW	delta+8(FP), A1
   200  	AMOADDW A1, (A0), A2
   201  	ADD	A2,A1,A0
   202  	MOVW	A0, ret+16(FP)
   203  	RET
   204  
   205  // func Xadd64(ptr *uint64, delta int64) uint64
   206  TEXT ·Xadd64(SB), NOSPLIT, $0-24
   207  	MOV	ptr+0(FP), A0
   208  	MOV	delta+8(FP), A1
   209  	AMOADDD A1, (A0), A2
   210  	ADD	A2, A1, A0
   211  	MOV	A0, ret+16(FP)
   212  	RET
   213  
   214  // func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
   215  TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
   216  	JMP	·Xadd64(SB)
   217  
   218  // func Xchguintptr(ptr *uintptr, new uintptr) uintptr
   219  TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
   220  	JMP	·Xchg64(SB)
   221  
   222  // func And8(ptr *uint8, val uint8)
   223  TEXT ·And8(SB), NOSPLIT, $0-9
   224  	MOV	ptr+0(FP), A0
   225  	MOVBU	val+8(FP), A1
   226  	AND	$3, A0, A2
   227  	AND	$-4, A0
   228  	SLL	$3, A2
   229  	XOR	$255, A1
   230  	SLL	A2, A1
   231  	XOR	$-1, A1
   232  	AMOANDW A1, (A0), ZERO
   233  	RET
   234  
   235  // func Or8(ptr *uint8, val uint8)
   236  TEXT ·Or8(SB), NOSPLIT, $0-9
   237  	MOV	ptr+0(FP), A0
   238  	MOVBU	val+8(FP), A1
   239  	AND	$3, A0, A2
   240  	AND	$-4, A0
   241  	SLL	$3, A2
   242  	SLL	A2, A1
   243  	AMOORW	A1, (A0), ZERO
   244  	RET
   245  
   246  // func And(ptr *uint32, val uint32)
   247  TEXT ·And(SB), NOSPLIT, $0-12
   248  	MOV	ptr+0(FP), A0
   249  	MOVW	val+8(FP), A1
   250  	AMOANDW	A1, (A0), ZERO
   251  	RET
   252  
   253  // func Or(ptr *uint32, val uint32)
   254  TEXT ·Or(SB), NOSPLIT, $0-12
   255  	MOV	ptr+0(FP), A0
   256  	MOVW	val+8(FP), A1
   257  	AMOORW	A1, (A0), ZERO
   258  	RET