github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/internal/atomic/atomic_loong64.s (about)

     1  // Copyright 2022 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "textflag.h"
     6  
     7  // bool cas(uint32 *ptr, uint32 old, uint32 new)
     8  // Atomically:
     9  //	if(*ptr == old){
    10  //		*ptr = new;
    11  //		return 1;
    12  //	} else
    13  //		return 0;
    14  TEXT ·Cas(SB), NOSPLIT, $0-17
    15  	MOVV	ptr+0(FP), R4
    16  	MOVW	old+8(FP), R5
    17  	MOVW	new+12(FP), R6
    18  	DBAR
    19  cas_again:
    20  	MOVV	R6, R7
    21  	LL	(R4), R8
    22  	BNE	R5, R8, cas_fail
    23  	SC	R7, (R4)
    24  	BEQ	R7, cas_again
    25  	MOVV	$1, R4
    26  	MOVB	R4, ret+16(FP)
    27  	DBAR
    28  	RET
    29  cas_fail:
    30  	MOVV	$0, R4
    31  	JMP	-4(PC)
    32  
    33  // bool	cas64(uint64 *ptr, uint64 old, uint64 new)
    34  // Atomically:
    35  //	if(*ptr == old){
    36  //		*ptr = new;
    37  //		return 1;
    38  //	} else {
    39  //		return 0;
    40  //	}
    41  TEXT ·Cas64(SB), NOSPLIT, $0-25
    42  	MOVV	ptr+0(FP), R4
    43  	MOVV	old+8(FP), R5
    44  	MOVV	new+16(FP), R6
    45  	DBAR
    46  cas64_again:
    47  	MOVV	R6, R7
    48  	LLV	(R4), R8
    49  	BNE	R5, R8, cas64_fail
    50  	SCV	R7, (R4)
    51  	BEQ	R7, cas64_again
    52  	MOVV	$1, R4
    53  	MOVB	R4, ret+24(FP)
    54  	DBAR
    55  	RET
    56  cas64_fail:
    57  	MOVV	$0, R4
    58  	JMP	-4(PC)
    59  
    60  TEXT ·Casuintptr(SB), NOSPLIT, $0-25
    61  	JMP	·Cas64(SB)
    62  
    63  TEXT ·CasRel(SB), NOSPLIT, $0-17
    64  	JMP	·Cas(SB)
    65  
    66  TEXT ·Loaduintptr(SB),  NOSPLIT|NOFRAME, $0-16
    67  	JMP	·Load64(SB)
    68  
    69  TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
    70  	JMP	·Load64(SB)
    71  
    72  TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
    73  	JMP	·Store64(SB)
    74  
    75  TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
    76  	JMP	·Xadd64(SB)
    77  
    78  TEXT ·Loadint64(SB), NOSPLIT, $0-16
    79  	JMP	·Load64(SB)
    80  
    81  TEXT ·Xaddint64(SB), NOSPLIT, $0-24
    82  	JMP	·Xadd64(SB)
    83  
    84  // bool casp(void **val, void *old, void *new)
    85  // Atomically:
    86  //	if(*val == old){
    87  //		*val = new;
    88  //		return 1;
    89  //	} else
    90  //		return 0;
    91  TEXT ·Casp1(SB), NOSPLIT, $0-25
    92  	JMP runtime∕internal∕atomic·Cas64(SB)
    93  
    94  // uint32 xadd(uint32 volatile *ptr, int32 delta)
    95  // Atomically:
    96  //	*val += delta;
    97  //	return *val;
    98  TEXT ·Xadd(SB), NOSPLIT, $0-20
    99  	MOVV	ptr+0(FP), R4
   100  	MOVW	delta+8(FP), R5
   101  	DBAR
   102  	LL	(R4), R6
   103  	ADDU	R6, R5, R7
   104  	MOVV	R7, R6
   105  	SC	R7, (R4)
   106  	BEQ	R7, -4(PC)
   107  	MOVW	R6, ret+16(FP)
   108  	DBAR
   109  	RET
   110  
   111  TEXT ·Xadd64(SB), NOSPLIT, $0-24
   112  	MOVV	ptr+0(FP), R4
   113  	MOVV	delta+8(FP), R5
   114  	DBAR
   115  	LLV	(R4), R6
   116  	ADDVU	R6, R5, R7
   117  	MOVV	R7, R6
   118  	SCV	R7, (R4)
   119  	BEQ	R7, -4(PC)
   120  	MOVV	R6, ret+16(FP)
   121  	DBAR
   122  	RET
   123  
   124  TEXT ·Xchg(SB), NOSPLIT, $0-20
   125  	MOVV	ptr+0(FP), R4
   126  	MOVW	new+8(FP), R5
   127  
   128  	DBAR
   129  	MOVV	R5, R6
   130  	LL	(R4), R7
   131  	SC	R6, (R4)
   132  	BEQ	R6, -3(PC)
   133  	MOVW	R7, ret+16(FP)
   134  	DBAR
   135  	RET
   136  
   137  TEXT ·Xchg64(SB), NOSPLIT, $0-24
   138  	MOVV	ptr+0(FP), R4
   139  	MOVV	new+8(FP), R5
   140  
   141  	DBAR
   142  	MOVV	R5, R6
   143  	LLV	(R4), R7
   144  	SCV	R6, (R4)
   145  	BEQ	R6, -3(PC)
   146  	MOVV	R7, ret+16(FP)
   147  	DBAR
   148  	RET
   149  
   150  TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
   151  	JMP	·Xchg64(SB)
   152  
   153  TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
   154  	JMP	·Store64(SB)
   155  
   156  TEXT ·StoreRel(SB), NOSPLIT, $0-12
   157  	JMP	·Store(SB)
   158  
   159  TEXT ·StoreReluintptr(SB), NOSPLIT, $0-16
   160  	JMP     ·Store64(SB)
   161  
   162  TEXT ·Store(SB), NOSPLIT, $0-12
   163  	MOVV	ptr+0(FP), R4
   164  	MOVW	val+8(FP), R5
   165  	DBAR
   166  	MOVW	R5, 0(R4)
   167  	DBAR
   168  	RET
   169  
   170  TEXT ·Store8(SB), NOSPLIT, $0-9
   171  	MOVV	ptr+0(FP), R4
   172  	MOVB	val+8(FP), R5
   173  	DBAR
   174  	MOVB	R5, 0(R4)
   175  	DBAR
   176  	RET
   177  
   178  TEXT ·Store64(SB), NOSPLIT, $0-16
   179  	MOVV	ptr+0(FP), R4
   180  	MOVV	val+8(FP), R5
   181  	DBAR
   182  	MOVV	R5, 0(R4)
   183  	DBAR
   184  	RET
   185  
   186  // void	Or8(byte volatile*, byte);
   187  TEXT ·Or8(SB), NOSPLIT, $0-9
   188  	MOVV	ptr+0(FP), R4
   189  	MOVBU	val+8(FP), R5
   190  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   191  	MOVV	$~3, R6
   192  	AND	R4, R6
   193  	// R7 = ((ptr & 3) * 8)
   194  	AND	$3, R4, R7
   195  	SLLV	$3, R7
   196  	// Shift val for aligned ptr. R5 = val << R4
   197  	SLLV	R7, R5
   198  
   199  	DBAR
   200  	LL	(R6), R7
   201  	OR	R5, R7
   202  	SC	R7, (R6)
   203  	BEQ	R7, -4(PC)
   204  	DBAR
   205  	RET
   206  
   207  // void	And8(byte volatile*, byte);
   208  TEXT ·And8(SB), NOSPLIT, $0-9
   209  	MOVV	ptr+0(FP), R4
   210  	MOVBU	val+8(FP), R5
   211  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   212  	MOVV	$~3, R6
   213  	AND	R4, R6
   214  	// R7 = ((ptr & 3) * 8)
   215  	AND	$3, R4, R7
   216  	SLLV	$3, R7
   217  	// Shift val for aligned ptr. R5 = val << R7 | ^(0xFF << R7)
   218  	MOVV	$0xFF, R8
   219  	SLLV	R7, R5
   220  	SLLV	R7, R8
   221  	NOR	R0, R8
   222  	OR	R8, R5
   223  
   224  	DBAR
   225  	LL	(R6), R7
   226  	AND	R5, R7
   227  	SC	R7, (R6)
   228  	BEQ	R7, -4(PC)
   229  	DBAR
   230  	RET
   231  
   232  // func Or(addr *uint32, v uint32)
   233  TEXT ·Or(SB), NOSPLIT, $0-12
   234  	MOVV	ptr+0(FP), R4
   235  	MOVW	val+8(FP), R5
   236  	DBAR
   237  	LL	(R4), R6
   238  	OR	R5, R6
   239  	SC	R6, (R4)
   240  	BEQ	R6, -4(PC)
   241  	DBAR
   242  	RET
   243  
   244  // func And(addr *uint32, v uint32)
   245  TEXT ·And(SB), NOSPLIT, $0-12
   246  	MOVV	ptr+0(FP), R4
   247  	MOVW	val+8(FP), R5
   248  	DBAR
   249  	LL	(R4), R6
   250  	AND	R5, R6
   251  	SC	R6, (R4)
   252  	BEQ	R6, -4(PC)
   253  	DBAR
   254  	RET
   255  
   256  // uint32 runtime∕internal∕atomic·Load(uint32 volatile* ptr)
   257  TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12
   258  	MOVV	ptr+0(FP), R19
   259  	DBAR
   260  	MOVWU	0(R19), R19
   261  	DBAR
   262  	MOVW	R19, ret+8(FP)
   263  	RET
   264  
   265  // uint8 runtime∕internal∕atomic·Load8(uint8 volatile* ptr)
   266  TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9
   267  	MOVV	ptr+0(FP), R19
   268  	DBAR
   269  	MOVBU	0(R19), R19
   270  	DBAR
   271  	MOVB	R19, ret+8(FP)
   272  	RET
   273  
   274  // uint64 runtime∕internal∕atomic·Load64(uint64 volatile* ptr)
   275  TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16
   276  	MOVV	ptr+0(FP), R19
   277  	DBAR
   278  	MOVV	0(R19), R19
   279  	DBAR
   280  	MOVV	R19, ret+8(FP)
   281  	RET
   282  
   283  // void *runtime∕internal∕atomic·Loadp(void *volatile *ptr)
   284  TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-16
   285  	MOVV	ptr+0(FP), R19
   286  	DBAR
   287  	MOVV	0(R19), R19
   288  	DBAR
   289  	MOVV	R19, ret+8(FP)
   290  	RET
   291  
   292  // uint32 runtime∕internal∕atomic·LoadAcq(uint32 volatile* ptr)
   293  TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-12
   294  	JMP	atomic·Load(SB)
   295  
   296  // uintptr ·LoadAcquintptr(uintptr volatile* ptr)
   297  TEXT ·LoadAcquintptr(SB),NOSPLIT|NOFRAME,$0-16
   298  	JMP     atomic·Load64(SB)
   299