github.com/c12o16h1/go/src@v0.0.0-20200114212001-5a151c0f00ed/runtime/internal/atomic/asm_mips64x.s (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build mips64 mips64le
     6  
     7  #include "textflag.h"
     8  
     9  // bool cas(uint32 *ptr, uint32 old, uint32 new)
    10  // Atomically:
    11  //	if(*val == old){
    12  //		*val = new;
    13  //		return 1;
    14  //	} else
    15  //		return 0;
    16  TEXT ·Cas(SB), NOSPLIT, $0-17
    17  	MOVV	ptr+0(FP), R1
    18  	MOVW	old+8(FP), R2
    19  	MOVW	new+12(FP), R5
    20  	SYNC
    21  cas_again:
    22  	MOVV	R5, R3
    23  	LL	(R1), R4
    24  	BNE	R2, R4, cas_fail
    25  	SC	R3, (R1)
    26  	BEQ	R3, cas_again
    27  	MOVV	$1, R1
    28  	MOVB	R1, ret+16(FP)
    29  	SYNC
    30  	RET
    31  cas_fail:
    32  	MOVV	$0, R1
    33  	JMP	-4(PC)
    34  
    35  // bool	cas64(uint64 *ptr, uint64 old, uint64 new)
    36  // Atomically:
    37  //	if(*val == *old){
    38  //		*val = new;
    39  //		return 1;
    40  //	} else {
    41  //		return 0;
    42  //	}
    43  TEXT ·Cas64(SB), NOSPLIT, $0-25
    44  	MOVV	ptr+0(FP), R1
    45  	MOVV	old+8(FP), R2
    46  	MOVV	new+16(FP), R5
    47  	SYNC
    48  cas64_again:
    49  	MOVV	R5, R3
    50  	LLV	(R1), R4
    51  	BNE	R2, R4, cas64_fail
    52  	SCV	R3, (R1)
    53  	BEQ	R3, cas64_again
    54  	MOVV	$1, R1
    55  	MOVB	R1, ret+24(FP)
    56  	SYNC
    57  	RET
    58  cas64_fail:
    59  	MOVV	$0, R1
    60  	JMP	-4(PC)
    61  
    62  TEXT ·Casuintptr(SB), NOSPLIT, $0-25
    63  	JMP	·Cas64(SB)
    64  
    65  TEXT ·CasRel(SB), NOSPLIT, $0-17
    66  	JMP	·Cas(SB)
    67  
    68  TEXT ·Loaduintptr(SB),  NOSPLIT|NOFRAME, $0-16
    69  	JMP	·Load64(SB)
    70  
    71  TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
    72  	JMP	·Load64(SB)
    73  
    74  TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
    75  	JMP	·Store64(SB)
    76  
    77  TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
    78  	JMP	·Xadd64(SB)
    79  
    80  TEXT ·Loadint64(SB), NOSPLIT, $0-16
    81  	JMP	·Load64(SB)
    82  
    83  TEXT ·Xaddint64(SB), NOSPLIT, $0-24
    84  	JMP	·Xadd64(SB)
    85  
    86  // bool casp(void **val, void *old, void *new)
    87  // Atomically:
    88  //	if(*val == old){
    89  //		*val = new;
    90  //		return 1;
    91  //	} else
    92  //		return 0;
    93  TEXT ·Casp1(SB), NOSPLIT, $0-25
    94  	JMP runtime∕internal∕atomic·Cas64(SB)
    95  
    96  // uint32 xadd(uint32 volatile *ptr, int32 delta)
    97  // Atomically:
    98  //	*val += delta;
    99  //	return *val;
   100  TEXT ·Xadd(SB), NOSPLIT, $0-20
   101  	MOVV	ptr+0(FP), R2
   102  	MOVW	delta+8(FP), R3
   103  	SYNC
   104  	LL	(R2), R1
   105  	ADDU	R1, R3, R4
   106  	MOVV	R4, R1
   107  	SC	R4, (R2)
   108  	BEQ	R4, -4(PC)
   109  	MOVW	R1, ret+16(FP)
   110  	SYNC
   111  	RET
   112  
   113  TEXT ·Xadd64(SB), NOSPLIT, $0-24
   114  	MOVV	ptr+0(FP), R2
   115  	MOVV	delta+8(FP), R3
   116  	SYNC
   117  	LLV	(R2), R1
   118  	ADDVU	R1, R3, R4
   119  	MOVV	R4, R1
   120  	SCV	R4, (R2)
   121  	BEQ	R4, -4(PC)
   122  	MOVV	R1, ret+16(FP)
   123  	SYNC
   124  	RET
   125  
   126  TEXT ·Xchg(SB), NOSPLIT, $0-20
   127  	MOVV	ptr+0(FP), R2
   128  	MOVW	new+8(FP), R5
   129  
   130  	SYNC
   131  	MOVV	R5, R3
   132  	LL	(R2), R1
   133  	SC	R3, (R2)
   134  	BEQ	R3, -3(PC)
   135  	MOVW	R1, ret+16(FP)
   136  	SYNC
   137  	RET
   138  
   139  TEXT ·Xchg64(SB), NOSPLIT, $0-24
   140  	MOVV	ptr+0(FP), R2
   141  	MOVV	new+8(FP), R5
   142  
   143  	SYNC
   144  	MOVV	R5, R3
   145  	LLV	(R2), R1
   146  	SCV	R3, (R2)
   147  	BEQ	R3, -3(PC)
   148  	MOVV	R1, ret+16(FP)
   149  	SYNC
   150  	RET
   151  
   152  TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
   153  	JMP	·Xchg64(SB)
   154  
   155  TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
   156  	JMP	·Store64(SB)
   157  
   158  TEXT ·StoreRel(SB), NOSPLIT, $0-12
   159  	JMP	·Store(SB)
   160  
   161  TEXT ·Store(SB), NOSPLIT, $0-12
   162  	MOVV	ptr+0(FP), R1
   163  	MOVW	val+8(FP), R2
   164  	SYNC
   165  	MOVW	R2, 0(R1)
   166  	SYNC
   167  	RET
   168  
   169  TEXT ·Store8(SB), NOSPLIT, $0-9
   170  	MOVV	ptr+0(FP), R1
   171  	MOVB	val+8(FP), R2
   172  	SYNC
   173  	MOVB	R2, 0(R1)
   174  	SYNC
   175  	RET
   176  
   177  TEXT ·Store64(SB), NOSPLIT, $0-16
   178  	MOVV	ptr+0(FP), R1
   179  	MOVV	val+8(FP), R2
   180  	SYNC
   181  	MOVV	R2, 0(R1)
   182  	SYNC
   183  	RET
   184  
   185  // void	Or8(byte volatile*, byte);
   186  TEXT ·Or8(SB), NOSPLIT, $0-9
   187  	MOVV	ptr+0(FP), R1
   188  	MOVBU	val+8(FP), R2
   189  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   190  	MOVV	$~3, R3
   191  	AND	R1, R3
   192  	// Compute val shift.
   193  #ifdef GOARCH_mips64
   194  	// Big endian.  ptr = ptr ^ 3
   195  	XOR	$3, R1
   196  #endif
   197  	// R4 = ((ptr & 3) * 8)
   198  	AND	$3, R1, R4
   199  	SLLV	$3, R4
   200  	// Shift val for aligned ptr. R2 = val << R4
   201  	SLLV	R4, R2
   202  
   203  	SYNC
   204  	LL	(R3), R4
   205  	OR	R2, R4
   206  	SC	R4, (R3)
   207  	BEQ	R4, -4(PC)
   208  	SYNC
   209  	RET
   210  
   211  // void	And8(byte volatile*, byte);
   212  TEXT ·And8(SB), NOSPLIT, $0-9
   213  	MOVV	ptr+0(FP), R1
   214  	MOVBU	val+8(FP), R2
   215  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   216  	MOVV	$~3, R3
   217  	AND	R1, R3
   218  	// Compute val shift.
   219  #ifdef GOARCH_mips64
   220  	// Big endian.  ptr = ptr ^ 3
   221  	XOR	$3, R1
   222  #endif
   223  	// R4 = ((ptr & 3) * 8)
   224  	AND	$3, R1, R4
   225  	SLLV	$3, R4
   226  	// Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
   227  	MOVV	$0xFF, R5
   228  	SLLV	R4, R2
   229  	SLLV	R4, R5
   230  	NOR	R0, R5
   231  	OR	R5, R2
   232  
   233  	SYNC
   234  	LL	(R3), R4
   235  	AND	R2, R4
   236  	SC	R4, (R3)
   237  	BEQ	R4, -4(PC)
   238  	SYNC
   239  	RET