github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/internal/atomic/asm_ppc64x.s (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build ppc64 ppc64le
     6  
     7  #include "textflag.h"
     8  
     9  // bool cas(uint32 *ptr, uint32 old, uint32 new)
    10  // Atomically:
    11  //	if(*val == old){
    12  //		*val = new;
    13  //		return 1;
    14  //	} else
    15  //		return 0;
    16  TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-17
    17  	MOVD	ptr+0(FP), R3
    18  	MOVWZ	old+8(FP), R4
    19  	MOVWZ	new+12(FP), R5
    20  	LWSYNC
    21  cas_again:
    22  	LWAR	(R3), R6
    23  	CMPW	R6, R4
    24  	BNE	cas_fail
    25  	STWCCC	R5, (R3)
    26  	BNE	cas_again
    27  	MOVD	$1, R3
    28  	LWSYNC
    29  	MOVB	R3, ret+16(FP)
    30  	RET
    31  cas_fail:
    32  	MOVB	R0, ret+16(FP)
    33  	RET
    34  
    35  // bool	runtime∕internal∕atomic·Cas64(uint64 *ptr, uint64 old, uint64 new)
    36  // Atomically:
    37  //	if(*val == *old){
    38  //		*val = new;
    39  //		return 1;
    40  //	} else {
    41  //		return 0;
    42  //	}
    43  TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
    44  	MOVD	ptr+0(FP), R3
    45  	MOVD	old+8(FP), R4
    46  	MOVD	new+16(FP), R5
    47  	LWSYNC
    48  cas64_again:
    49  	LDAR	(R3), R6
    50  	CMP	R6, R4
    51  	BNE	cas64_fail
    52  	STDCCC	R5, (R3)
    53  	BNE	cas64_again
    54  	MOVD	$1, R3
    55  	LWSYNC
    56  	MOVB	R3, ret+24(FP)
    57  	RET
    58  cas64_fail:
    59  	MOVB	R0, ret+24(FP)
    60  	RET
    61  
    62  TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17
    63  	MOVD    ptr+0(FP), R3
    64  	MOVWZ   old+8(FP), R4
    65  	MOVWZ   new+12(FP), R5
    66  	LWSYNC
    67  cas_again:
    68  	LWAR    (R3), $0, R6        // 0 = Mutex release hint
    69  	CMPW    R6, R4
    70  	BNE     cas_fail
    71  	STWCCC  R5, (R3)
    72  	BNE     cas_again
    73  	MOVD    $1, R3
    74  	MOVB    R3, ret+16(FP)
    75  	RET
    76  cas_fail:
    77  	MOVB    R0, ret+16(FP)
    78  	RET
    79  
    80  TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25
    81  	BR	runtime∕internal∕atomic·Cas64(SB)
    82  
    83  TEXT runtime∕internal∕atomic·Loaduintptr(SB),  NOSPLIT|NOFRAME, $0-16
    84  	BR	runtime∕internal∕atomic·Load64(SB)
    85  
    86  TEXT runtime∕internal∕atomic·LoadAcquintptr(SB),  NOSPLIT|NOFRAME, $0-16
    87  	BR	runtime∕internal∕atomic·LoadAcq64(SB)
    88  
    89  TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
    90  	BR	runtime∕internal∕atomic·Load64(SB)
    91  
    92  TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-16
    93  	BR	runtime∕internal∕atomic·Store64(SB)
    94  
    95  TEXT runtime∕internal∕atomic·StoreReluintptr(SB), NOSPLIT, $0-16
    96  	BR	runtime∕internal∕atomic·StoreRel64(SB)
    97  
    98  TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-24
    99  	BR	runtime∕internal∕atomic·Xadd64(SB)
   100  
   101  TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
   102  	BR	runtime∕internal∕atomic·Load64(SB)
   103  
   104  TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-24
   105  	BR	runtime∕internal∕atomic·Xadd64(SB)
   106  
   107  // bool casp(void **val, void *old, void *new)
   108  // Atomically:
   109  //	if(*val == old){
   110  //		*val = new;
   111  //		return 1;
   112  //	} else
   113  //		return 0;
   114  TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-25
   115  	BR runtime∕internal∕atomic·Cas64(SB)
   116  
   117  // uint32 xadd(uint32 volatile *ptr, int32 delta)
   118  // Atomically:
   119  //	*val += delta;
   120  //	return *val;
   121  TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20
   122  	MOVD	ptr+0(FP), R4
   123  	MOVW	delta+8(FP), R5
   124  	LWSYNC
   125  	LWAR	(R4), R3
   126  	ADD	R5, R3
   127  	STWCCC	R3, (R4)
   128  	BNE	-3(PC)
   129  	MOVW	R3, ret+16(FP)
   130  	RET
   131  
   132  TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24
   133  	MOVD	ptr+0(FP), R4
   134  	MOVD	delta+8(FP), R5
   135  	LWSYNC
   136  	LDAR	(R4), R3
   137  	ADD	R5, R3
   138  	STDCCC	R3, (R4)
   139  	BNE	-3(PC)
   140  	MOVD	R3, ret+16(FP)
   141  	RET
   142  
   143  TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20
   144  	MOVD	ptr+0(FP), R4
   145  	MOVW	new+8(FP), R5
   146  	LWSYNC
   147  	LWAR	(R4), R3
   148  	STWCCC	R5, (R4)
   149  	BNE	-2(PC)
   150  	ISYNC
   151  	MOVW	R3, ret+16(FP)
   152  	RET
   153  
   154  TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24
   155  	MOVD	ptr+0(FP), R4
   156  	MOVD	new+8(FP), R5
   157  	LWSYNC
   158  	LDAR	(R4), R3
   159  	STDCCC	R5, (R4)
   160  	BNE	-2(PC)
   161  	ISYNC
   162  	MOVD	R3, ret+16(FP)
   163  	RET
   164  
   165  TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24
   166  	BR	runtime∕internal∕atomic·Xchg64(SB)
   167  
   168  
   169  TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16
   170  	BR	runtime∕internal∕atomic·Store64(SB)
   171  
   172  TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
   173  	MOVD	ptr+0(FP), R3
   174  	MOVW	val+8(FP), R4
   175  	SYNC
   176  	MOVW	R4, 0(R3)
   177  	RET
   178  
   179  TEXT runtime∕internal∕atomic·Store8(SB), NOSPLIT, $0-9
   180  	MOVD	ptr+0(FP), R3
   181  	MOVB	val+8(FP), R4
   182  	SYNC
   183  	MOVB	R4, 0(R3)
   184  	RET
   185  
   186  TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
   187  	MOVD	ptr+0(FP), R3
   188  	MOVD	val+8(FP), R4
   189  	SYNC
   190  	MOVD	R4, 0(R3)
   191  	RET
   192  
   193  TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12
   194  	MOVD	ptr+0(FP), R3
   195  	MOVW	val+8(FP), R4
   196  	LWSYNC
   197  	MOVW	R4, 0(R3)
   198  	RET
   199  
   200  TEXT runtime∕internal∕atomic·StoreRel64(SB), NOSPLIT, $0-16
   201  	MOVD	ptr+0(FP), R3
   202  	MOVD	val+8(FP), R4
   203  	LWSYNC
   204  	MOVD	R4, 0(R3)
   205  	RET
   206  
   207  // void runtime∕internal∕atomic·Or8(byte volatile*, byte);
   208  TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-9
   209  	MOVD	ptr+0(FP), R3
   210  	MOVBZ	val+8(FP), R4
   211  	LWSYNC
   212  again:
   213  	LBAR	(R3), R6
   214  	OR	R4, R6
   215  	STBCCC	R6, (R3)
   216  	BNE	again
   217  	RET
   218  
   219  // void runtime∕internal∕atomic·And8(byte volatile*, byte);
   220  TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-9
   221  	MOVD	ptr+0(FP), R3
   222  	MOVBZ	val+8(FP), R4
   223  	LWSYNC
   224  again:
   225  	LBAR	(R3), R6
   226  	AND	R4, R6
   227  	STBCCC	R6, (R3)
   228  	BNE	again
   229  	RET
   230  
   231  // func Or(addr *uint32, v uint32)
   232  TEXT runtime∕internal∕atomic·Or(SB), NOSPLIT, $0-12
   233  	MOVD	ptr+0(FP), R3
   234  	MOVW	val+8(FP), R4
   235  	LWSYNC
   236  again:
   237  	LWAR	(R3), R6
   238  	OR	R4, R6
   239  	STWCCC	R6, (R3)
   240  	BNE	again
   241  	RET
   242  
   243  // func And(addr *uint32, v uint32)
   244  TEXT runtime∕internal∕atomic·And(SB), NOSPLIT, $0-12
   245  	MOVD	ptr+0(FP), R3
   246  	MOVW	val+8(FP), R4
   247  	LWSYNC
   248  again:
   249  	LWAR	(R3),R6
   250  	AND	R4, R6
   251  	STWCCC	R6, (R3)
   252  	BNE	again
   253  	RET