github.com/aloncn/graphics-go@v0.0.1/src/runtime/internal/atomic/asm_ppc64x.s (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build ppc64 ppc64le
     6  
     7  #include "textflag.h"
     8  
     9  // bool cas(uint32 *ptr, uint32 old, uint32 new)
    10  // Atomically:
    11  //	if(*val == old){
    12  //		*val = new;
    13  //		return 1;
    14  //	} else
    15  //		return 0;
    16  TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-17
    17  	MOVD	ptr+0(FP), R3
    18  	MOVWZ	old+8(FP), R4
    19  	MOVWZ	new+12(FP), R5
    20  cas_again:
    21  	SYNC
    22  	LWAR	(R3), R6
    23  	CMPW	R6, R4
    24  	BNE	cas_fail
    25  	STWCCC	R5, (R3)
    26  	BNE	cas_again
    27  	MOVD	$1, R3
    28  	SYNC
    29  	ISYNC
    30  	MOVB	R3, ret+16(FP)
    31  	RET
    32  cas_fail:
    33  	MOVD	$0, R3
    34  	BR	-5(PC)
    35  
    36  // bool	runtime∕internal∕atomic·Cas64(uint64 *ptr, uint64 old, uint64 new)
    37  // Atomically:
    38  //	if(*val == *old){
    39  //		*val = new;
    40  //		return 1;
    41  //	} else {
    42  //		return 0;
    43  //	}
    44  TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
    45  	MOVD	ptr+0(FP), R3
    46  	MOVD	old+8(FP), R4
    47  	MOVD	new+16(FP), R5
    48  cas64_again:
    49  	SYNC
    50  	LDAR	(R3), R6
    51  	CMP	R6, R4
    52  	BNE	cas64_fail
    53  	STDCCC	R5, (R3)
    54  	BNE	cas64_again
    55  	MOVD	$1, R3
    56  	SYNC
    57  	ISYNC
    58  	MOVB	R3, ret+24(FP)
    59  	RET
    60  cas64_fail:
    61  	MOVD	$0, R3
    62  	BR	-5(PC)
    63  
    64  TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25
    65  	BR	runtime∕internal∕atomic·Cas64(SB)
    66  
    67  TEXT runtime∕internal∕atomic·Loaduintptr(SB),  NOSPLIT|NOFRAME, $0-16
    68  	BR	runtime∕internal∕atomic·Load64(SB)
    69  
    70  TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
    71  	BR	runtime∕internal∕atomic·Load64(SB)
    72  
    73  TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-16
    74  	BR	runtime∕internal∕atomic·Store64(SB)
    75  
    76  TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-24
    77  	BR	runtime∕internal∕atomic·Xadd64(SB)
    78  
    79  TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
    80  	BR	runtime∕internal∕atomic·Load64(SB)
    81  
    82  TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-16
    83  	BR	runtime∕internal∕atomic·Xadd64(SB)
    84  
    85  // bool casp(void **val, void *old, void *new)
    86  // Atomically:
    87  //	if(*val == old){
    88  //		*val = new;
    89  //		return 1;
    90  //	} else
    91  //		return 0;
    92  TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-25
    93  	BR runtime∕internal∕atomic·Cas64(SB)
    94  
    95  // uint32 xadd(uint32 volatile *ptr, int32 delta)
    96  // Atomically:
    97  //	*val += delta;
    98  //	return *val;
    99  TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20
   100  	MOVD	ptr+0(FP), R4
   101  	MOVW	delta+8(FP), R5
   102  	SYNC
   103  	LWAR	(R4), R3
   104  	ADD	R5, R3
   105  	STWCCC	R3, (R4)
   106  	BNE	-4(PC)
   107  	SYNC
   108  	ISYNC
   109  	MOVW	R3, ret+16(FP)
   110  	RET
   111  
   112  TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24
   113  	MOVD	ptr+0(FP), R4
   114  	MOVD	delta+8(FP), R5
   115  	SYNC
   116  	LDAR	(R4), R3
   117  	ADD	R5, R3
   118  	STDCCC	R3, (R4)
   119  	BNE	-4(PC)
   120  	SYNC
   121  	ISYNC
   122  	MOVD	R3, ret+16(FP)
   123  	RET
   124  
   125  TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20
   126  	MOVD	ptr+0(FP), R4
   127  	MOVW	new+8(FP), R5
   128  	SYNC
   129  	LWAR	(R4), R3
   130  	STWCCC	R5, (R4)
   131  	BNE	-3(PC)
   132  	SYNC
   133  	ISYNC
   134  	MOVW	R3, ret+16(FP)
   135  	RET
   136  
   137  TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24
   138  	MOVD	ptr+0(FP), R4
   139  	MOVD	new+8(FP), R5
   140  	SYNC
   141  	LDAR	(R4), R3
   142  	STDCCC	R5, (R4)
   143  	BNE	-3(PC)
   144  	SYNC
   145  	ISYNC
   146  	MOVD	R3, ret+16(FP)
   147  	RET
   148  
   149  TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24
   150  	BR	runtime∕internal∕atomic·Xchg64(SB)
   151  
   152  
   153  TEXT runtime∕internal∕atomic·Storep1(SB), NOSPLIT, $0-16
   154  	BR	runtime∕internal∕atomic·Store64(SB)
   155  
   156  TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
   157  	MOVD	ptr+0(FP), R3
   158  	MOVW	val+8(FP), R4
   159  	SYNC
   160  	MOVW	R4, 0(R3)
   161  	RET
   162  
   163  TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
   164  	MOVD	ptr+0(FP), R3
   165  	MOVD	val+8(FP), R4
   166  	SYNC
   167  	MOVD	R4, 0(R3)
   168  	RET
   169  
   170  // void	runtime∕internal∕atomic·Or8(byte volatile*, byte);
   171  TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-9
   172  	MOVD	ptr+0(FP), R3
   173  	MOVBZ	val+8(FP), R4
   174  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   175  	// R5 = (R3 << 0) & ~3
   176  	RLDCR	$0, R3, $~3, R5
   177  	// Compute val shift.
   178  #ifdef GOARCH_ppc64
   179  	// Big endian.  ptr = ptr ^ 3
   180  	XOR	$3, R3
   181  #endif
   182  	// R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8)
   183  	RLDC	$3, R3, $(3*8), R6
   184  	// Shift val for aligned ptr.  R4 = val << R6
   185  	SLD	R6, R4, R4
   186  
   187  again:
   188  	SYNC
   189  	LWAR	(R5), R6
   190  	OR	R4, R6
   191  	STWCCC	R6, (R5)
   192  	BNE	again
   193  	SYNC
   194  	ISYNC
   195  	RET
   196  
   197  // void	runtime∕internal∕atomic·And8(byte volatile*, byte);
   198  TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-9
   199  	MOVD	ptr+0(FP), R3
   200  	MOVBZ	val+8(FP), R4
   201  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   202  	// R5 = (R3 << 0) & ~3
   203  	RLDCR	$0, R3, $~3, R5
   204  	// Compute val shift.
   205  #ifdef GOARCH_ppc64
   206  	// Big endian.  ptr = ptr ^ 3
   207  	XOR	$3, R3
   208  #endif
   209  	// R6 = ((ptr & 3) * 8) = (ptr << 3) & (3*8)
   210  	RLDC	$3, R3, $(3*8), R6
   211  	// Shift val for aligned ptr.  R4 = val << R6 | ^(0xFF << R6)
   212  	MOVD	$0xFF, R7
   213  	SLD	R6, R4
   214  	SLD	R6, R7
   215  	XOR $-1, R7
   216  	OR	R7, R4
   217  again:
   218  	SYNC
   219  	LWAR	(R5), R6
   220  	AND	R4, R6
   221  	STWCCC	R6, (R5)
   222  	BNE	again
   223  	SYNC
   224  	ISYNC
   225  	RET