github.com/zxy12/go_duplicate_112_new@v0.0.0-20200807091221-747231827200/src/runtime/internal/atomic/asm_mips64x.s (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build mips64 mips64le
     6  
     7  #include "textflag.h"
     8  
     9  // bool cas(uint32 *ptr, uint32 old, uint32 new)
    10  // Atomically:
    11  //	if(*val == old){
    12  //		*val = new;
    13  //		return 1;
    14  //	} else
    15  //		return 0;
    16  TEXT ·Cas(SB), NOSPLIT, $0-17
    17  	MOVV	ptr+0(FP), R1
    18  	MOVW	old+8(FP), R2
    19  	MOVW	new+12(FP), R5
    20  	SYNC
    21  cas_again:
    22  	MOVV	R5, R3
    23  	LL	(R1), R4
    24  	BNE	R2, R4, cas_fail
    25  	SC	R3, (R1)
    26  	BEQ	R3, cas_again
    27  	MOVV	$1, R1
    28  	MOVB	R1, ret+16(FP)
    29  	SYNC
    30  	RET
    31  cas_fail:
    32  	MOVV	$0, R1
    33  	JMP	-4(PC)
    34  
    35  // bool	cas64(uint64 *ptr, uint64 old, uint64 new)
    36  // Atomically:
    37  //	if(*val == *old){
    38  //		*val = new;
    39  //		return 1;
    40  //	} else {
    41  //		return 0;
    42  //	}
    43  TEXT ·Cas64(SB), NOSPLIT, $0-25
    44  	MOVV	ptr+0(FP), R1
    45  	MOVV	old+8(FP), R2
    46  	MOVV	new+16(FP), R5
    47  	SYNC
    48  cas64_again:
    49  	MOVV	R5, R3
    50  	LLV	(R1), R4
    51  	BNE	R2, R4, cas64_fail
    52  	SCV	R3, (R1)
    53  	BEQ	R3, cas64_again
    54  	MOVV	$1, R1
    55  	MOVB	R1, ret+24(FP)
    56  	SYNC
    57  	RET
    58  cas64_fail:
    59  	MOVV	$0, R1
    60  	JMP	-4(PC)
    61  
    62  TEXT ·Casuintptr(SB), NOSPLIT, $0-25
    63  	JMP	·Cas64(SB)
    64  
    65  TEXT ·CasRel(SB), NOSPLIT, $0-17
    66  	JMP	·Cas(SB)
    67  
    68  TEXT ·Loaduintptr(SB),  NOSPLIT|NOFRAME, $0-16
    69  	JMP	·Load64(SB)
    70  
    71  TEXT ·Loaduint(SB), NOSPLIT|NOFRAME, $0-16
    72  	JMP	·Load64(SB)
    73  
    74  TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
    75  	JMP	·Store64(SB)
    76  
    77  TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
    78  	JMP	·Xadd64(SB)
    79  
    80  TEXT ·Loadint64(SB), NOSPLIT, $0-16
    81  	JMP	·Load64(SB)
    82  
    83  TEXT ·Xaddint64(SB), NOSPLIT, $0-24
    84  	JMP	·Xadd64(SB)
    85  
    86  // bool casp(void **val, void *old, void *new)
    87  // Atomically:
    88  //	if(*val == old){
    89  //		*val = new;
    90  //		return 1;
    91  //	} else
    92  //		return 0;
    93  TEXT ·Casp1(SB), NOSPLIT, $0-25
    94  	JMP runtime∕internal∕atomic·Cas64(SB)
    95  
    96  // uint32 xadd(uint32 volatile *ptr, int32 delta)
    97  // Atomically:
    98  //	*val += delta;
    99  //	return *val;
   100  TEXT ·Xadd(SB), NOSPLIT, $0-20
   101  	MOVV	ptr+0(FP), R2
   102  	MOVW	delta+8(FP), R3
   103  	SYNC
   104  	LL	(R2), R1
   105  	ADDU	R1, R3, R4
   106  	MOVV	R4, R1
   107  	SC	R4, (R2)
   108  	BEQ	R4, -4(PC)
   109  	MOVW	R1, ret+16(FP)
   110  	SYNC
   111  	RET
   112  
   113  TEXT ·Xadd64(SB), NOSPLIT, $0-24
   114  	MOVV	ptr+0(FP), R2
   115  	MOVV	delta+8(FP), R3
   116  	SYNC
   117  	LLV	(R2), R1
   118  	ADDVU	R1, R3, R4
   119  	MOVV	R4, R1
   120  	SCV	R4, (R2)
   121  	BEQ	R4, -4(PC)
   122  	MOVV	R1, ret+16(FP)
   123  	SYNC
   124  	RET
   125  
   126  TEXT ·Xchg(SB), NOSPLIT, $0-20
   127  	MOVV	ptr+0(FP), R2
   128  	MOVW	new+8(FP), R5
   129  
   130  	SYNC
   131  	MOVV	R5, R3
   132  	LL	(R2), R1
   133  	SC	R3, (R2)
   134  	BEQ	R3, -3(PC)
   135  	MOVW	R1, ret+16(FP)
   136  	SYNC
   137  	RET
   138  
   139  TEXT ·Xchg64(SB), NOSPLIT, $0-24
   140  	MOVV	ptr+0(FP), R2
   141  	MOVV	new+8(FP), R5
   142  
   143  	SYNC
   144  	MOVV	R5, R3
   145  	LLV	(R2), R1
   146  	SCV	R3, (R2)
   147  	BEQ	R3, -3(PC)
   148  	MOVV	R1, ret+16(FP)
   149  	SYNC
   150  	RET
   151  
   152  TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
   153  	JMP	·Xchg64(SB)
   154  
   155  TEXT ·StorepNoWB(SB), NOSPLIT, $0-16
   156  	JMP	·Store64(SB)
   157  
   158  TEXT ·StoreRel(SB), NOSPLIT, $0-12
   159  	JMP	·Store(SB)
   160  
   161  TEXT ·Store(SB), NOSPLIT, $0-12
   162  	MOVV	ptr+0(FP), R1
   163  	MOVW	val+8(FP), R2
   164  	SYNC
   165  	MOVW	R2, 0(R1)
   166  	SYNC
   167  	RET
   168  
   169  TEXT ·Store64(SB), NOSPLIT, $0-16
   170  	MOVV	ptr+0(FP), R1
   171  	MOVV	val+8(FP), R2
   172  	SYNC
   173  	MOVV	R2, 0(R1)
   174  	SYNC
   175  	RET
   176  
   177  // void	Or8(byte volatile*, byte);
   178  TEXT ·Or8(SB), NOSPLIT, $0-9
   179  	MOVV	ptr+0(FP), R1
   180  	MOVBU	val+8(FP), R2
   181  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   182  	MOVV	$~3, R3
   183  	AND	R1, R3
   184  	// Compute val shift.
   185  #ifdef GOARCH_mips64
   186  	// Big endian.  ptr = ptr ^ 3
   187  	XOR	$3, R1
   188  #endif
   189  	// R4 = ((ptr & 3) * 8)
   190  	AND	$3, R1, R4
   191  	SLLV	$3, R4
   192  	// Shift val for aligned ptr. R2 = val << R4
   193  	SLLV	R4, R2
   194  
   195  	SYNC
   196  	LL	(R3), R4
   197  	OR	R2, R4
   198  	SC	R4, (R3)
   199  	BEQ	R4, -4(PC)
   200  	SYNC
   201  	RET
   202  
   203  // void	And8(byte volatile*, byte);
   204  TEXT ·And8(SB), NOSPLIT, $0-9
   205  	MOVV	ptr+0(FP), R1
   206  	MOVBU	val+8(FP), R2
   207  	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   208  	MOVV	$~3, R3
   209  	AND	R1, R3
   210  	// Compute val shift.
   211  #ifdef GOARCH_mips64
   212  	// Big endian.  ptr = ptr ^ 3
   213  	XOR	$3, R1
   214  #endif
   215  	// R4 = ((ptr & 3) * 8)
   216  	AND	$3, R1, R4
   217  	SLLV	$3, R4
   218  	// Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
   219  	MOVV	$0xFF, R5
   220  	SLLV	R4, R2
   221  	SLLV	R4, R5
   222  	NOR	R0, R5
   223  	OR	R5, R2
   224  
   225  	SYNC
   226  	LL	(R3), R4
   227  	AND	R2, R4
   228  	SC	R4, (R3)
   229  	BEQ	R4, -4(PC)
   230  	SYNC
   231  	RET