github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/internal/atomic/asm_mipsx.s (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build mips mipsle
     6  
     7  #include "textflag.h"
     8  
     9  TEXT ·Cas(SB),NOSPLIT,$0-13
    10  	MOVW	ptr+0(FP), R1
    11  	MOVW	old+4(FP), R2
    12  	MOVW	new+8(FP), R5
    13  	SYNC
    14  try_cas:
    15  	MOVW	R5, R3
    16  	LL	(R1), R4	// R4 = *R1
    17  	BNE	R2, R4, cas_fail
    18  	SC	R3, (R1)	// *R1 = R3
    19  	BEQ	R3, try_cas
    20  	SYNC
    21  	MOVB	R3, ret+12(FP)
    22  	RET
    23  cas_fail:
    24  	MOVB	R0, ret+12(FP)
    25  	RET
    26  
    27  TEXT ·Store(SB),NOSPLIT,$0-8
    28  	MOVW	ptr+0(FP), R1
    29  	MOVW	val+4(FP), R2
    30  	SYNC
    31  	MOVW	R2, 0(R1)
    32  	SYNC
    33  	RET
    34  
    35  TEXT ·Store8(SB),NOSPLIT,$0-5
    36  	MOVW	ptr+0(FP), R1
    37  	MOVB	val+4(FP), R2
    38  	SYNC
    39  	MOVB	R2, 0(R1)
    40  	SYNC
    41  	RET
    42  
    43  TEXT ·Load(SB),NOSPLIT,$0-8
    44  	MOVW	ptr+0(FP), R1
    45  	SYNC
    46  	MOVW	0(R1), R1
    47  	SYNC
    48  	MOVW	R1, ret+4(FP)
    49  	RET
    50  
    51  TEXT ·Load8(SB),NOSPLIT,$0-5
    52  	MOVW	ptr+0(FP), R1
    53  	SYNC
    54  	MOVB	0(R1), R1
    55  	SYNC
    56  	MOVB	R1, ret+4(FP)
    57  	RET
    58  
    59  TEXT ·Xadd(SB),NOSPLIT,$0-12
    60  	MOVW	ptr+0(FP), R2
    61  	MOVW	delta+4(FP), R3
    62  	SYNC
    63  try_xadd:
    64  	LL	(R2), R1	// R1 = *R2
    65  	ADDU	R1, R3, R4
    66  	MOVW	R4, R1
    67  	SC	R4, (R2)	// *R2 = R4
    68  	BEQ	R4, try_xadd
    69  	SYNC
    70  	MOVW	R1, ret+8(FP)
    71  	RET
    72  
    73  TEXT ·Xchg(SB),NOSPLIT,$0-12
    74  	MOVW	ptr+0(FP), R2
    75  	MOVW	new+4(FP), R5
    76  	SYNC
    77  try_xchg:
    78  	MOVW	R5, R3
    79  	LL	(R2), R1	// R1 = *R2
    80  	SC	R3, (R2)	// *R2 = R3
    81  	BEQ	R3, try_xchg
    82  	SYNC
    83  	MOVW	R1, ret+8(FP)
    84  	RET
    85  
    86  TEXT ·Casuintptr(SB),NOSPLIT,$0-13
    87  	JMP	·Cas(SB)
    88  
    89  TEXT ·CasRel(SB),NOSPLIT,$0-13
    90  	JMP	·Cas(SB)
    91  
    92  TEXT ·Loaduintptr(SB),NOSPLIT,$0-8
    93  	JMP	·Load(SB)
    94  
    95  TEXT ·Loaduint(SB),NOSPLIT,$0-8
    96  	JMP	·Load(SB)
    97  
    98  TEXT ·Loadp(SB),NOSPLIT,$-0-8
    99  	JMP	·Load(SB)
   100  
   101  TEXT ·Storeuintptr(SB),NOSPLIT,$0-8
   102  	JMP	·Store(SB)
   103  
   104  TEXT ·Xadduintptr(SB),NOSPLIT,$0-12
   105  	JMP	·Xadd(SB)
   106  
   107  TEXT ·Loadint64(SB),NOSPLIT,$0-12
   108  	JMP	·Load64(SB)
   109  
   110  TEXT ·Xaddint64(SB),NOSPLIT,$0-20
   111  	JMP	·Xadd64(SB)
   112  
   113  TEXT ·Casp1(SB),NOSPLIT,$0-13
   114  	JMP	·Cas(SB)
   115  
   116  TEXT ·Xchguintptr(SB),NOSPLIT,$0-12
   117  	JMP	·Xchg(SB)
   118  
   119  TEXT ·StorepNoWB(SB),NOSPLIT,$0-8
   120  	JMP	·Store(SB)
   121  
   122  TEXT ·StoreRel(SB),NOSPLIT,$0-8
   123  	JMP	·Store(SB)
   124  
   125  TEXT ·StoreReluintptr(SB),NOSPLIT,$0-8
   126  	JMP	·Store(SB)
   127  
   128  // void	Or8(byte volatile*, byte);
   129  TEXT ·Or8(SB),NOSPLIT,$0-5
   130  	MOVW	ptr+0(FP), R1
   131  	MOVBU	val+4(FP), R2
   132  	MOVW	$~3, R3	// Align ptr down to 4 bytes so we can use 32-bit load/store.
   133  	AND	R1, R3
   134  #ifdef GOARCH_mips
   135  	// Big endian.  ptr = ptr ^ 3
   136  	XOR	$3, R1
   137  #endif
   138  	AND	$3, R1, R4	// R4 = ((ptr & 3) * 8)
   139  	SLL	$3, R4
   140  	SLL	R4, R2, R2	// Shift val for aligned ptr. R2 = val << R4
   141  	SYNC
   142  try_or8:
   143  	LL	(R3), R4	// R4 = *R3
   144  	OR	R2, R4
   145  	SC	R4, (R3)	// *R3 = R4
   146  	BEQ	R4, try_or8
   147  	SYNC
   148  	RET
   149  
   150  // void	And8(byte volatile*, byte);
   151  TEXT ·And8(SB),NOSPLIT,$0-5
   152  	MOVW	ptr+0(FP), R1
   153  	MOVBU	val+4(FP), R2
   154  	MOVW	$~3, R3
   155  	AND	R1, R3
   156  #ifdef GOARCH_mips
   157  	// Big endian.  ptr = ptr ^ 3
   158  	XOR	$3, R1
   159  #endif
   160  	AND	$3, R1, R4	// R4 = ((ptr & 3) * 8)
   161  	SLL	$3, R4
   162  	MOVW	$0xFF, R5
   163  	SLL	R4, R2
   164  	SLL	R4, R5
   165  	NOR	R0, R5
   166  	OR	R5, R2	// Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
   167  	SYNC
   168  try_and8:
   169  	LL	(R3), R4	// R4 = *R3
   170  	AND	R2, R4
   171  	SC	R4, (R3)	// *R3 = R4
   172  	BEQ	R4, try_and8
   173  	SYNC
   174  	RET
   175  
   176  // func Or(addr *uint32, v uint32)
   177  TEXT ·Or(SB), NOSPLIT, $0-8
   178  	MOVW	ptr+0(FP), R1
   179  	MOVW	val+4(FP), R2
   180  
   181  	SYNC
   182  	LL	(R1), R3
   183  	OR	R2, R3
   184  	SC	R3, (R1)
   185  	BEQ	R3, -4(PC)
   186  	SYNC
   187  	RET
   188  
   189  // func And(addr *uint32, v uint32)
   190  TEXT ·And(SB), NOSPLIT, $0-8
   191  	MOVW	ptr+0(FP), R1
   192  	MOVW	val+4(FP), R2
   193  
   194  	SYNC
   195  	LL	(R1), R3
   196  	AND	R2, R3
   197  	SC	R3, (R1)
   198  	BEQ	R3, -4(PC)
   199  	SYNC
   200  	RET