github.com/zxy12/go_duplicate_112_new@v0.0.0-20200807091221-747231827200/src/runtime/internal/atomic/asm_arm.s (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "textflag.h"
     6  
     7  // bool armcas(int32 *val, int32 old, int32 new)
     8  // Atomically:
     9  //	if(*val == old){
    10  //		*val = new;
    11  //		return 1;
    12  //	}else
    13  //		return 0;
    14  //
    15  // To implement runtime∕internal∕atomic·cas in sys_$GOOS_arm.s
    16  // using the native instructions, use:
    17  //
    18  //	TEXT runtime∕internal∕atomic·cas(SB),NOSPLIT,$0
    19  //		B	runtime∕internal∕atomic·armcas(SB)
    20  //
    21  TEXT runtime∕internal∕atomic·armcas(SB),NOSPLIT,$0-13
    22  	MOVW	ptr+0(FP), R1
    23  	MOVW	old+4(FP), R2
    24  	MOVW	new+8(FP), R3
    25  casl:
    26  	LDREX	(R1), R0
    27  	CMP	R0, R2
    28  	BNE	casfail
    29  
    30  	MOVB	runtime·goarm(SB), R8
    31  	CMP	$7, R8
    32  	BLT	2(PC)
    33  	DMB	MB_ISHST
    34  
    35  	STREX	R3, (R1), R0
    36  	CMP	$0, R0
    37  	BNE	casl
    38  	MOVW	$1, R0
    39  
    40  	CMP	$7, R8
    41  	BLT	2(PC)
    42  	DMB	MB_ISH
    43  
    44  	MOVB	R0, ret+12(FP)
    45  	RET
    46  casfail:
    47  	MOVW	$0, R0
    48  	MOVB	R0, ret+12(FP)
    49  	RET
    50  
    51  // stubs
    52  
    53  TEXT runtime∕internal∕atomic·Loadp(SB),NOSPLIT|NOFRAME,$0-8
    54  	B runtime∕internal∕atomic·Load(SB)
    55  
    56  TEXT runtime∕internal∕atomic·LoadAcq(SB),NOSPLIT|NOFRAME,$0-8
    57  	B runtime∕internal∕atomic·Load(SB)
    58  
    59  TEXT runtime∕internal∕atomic·Casuintptr(SB),NOSPLIT,$0-13
    60  	B	runtime∕internal∕atomic·Cas(SB)
    61  
    62  TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0-13
    63  	B	runtime∕internal∕atomic·Cas(SB)
    64  
    65  TEXT runtime∕internal∕atomic·CasRel(SB),NOSPLIT,$0-13
    66  	B	runtime∕internal∕atomic·Cas(SB)
    67  
    68  TEXT runtime∕internal∕atomic·Loaduintptr(SB),NOSPLIT,$0-8
    69  	B	runtime∕internal∕atomic·Load(SB)
    70  
    71  TEXT runtime∕internal∕atomic·Loaduint(SB),NOSPLIT,$0-8
    72  	B	runtime∕internal∕atomic·Load(SB)
    73  
    74  TEXT runtime∕internal∕atomic·Storeuintptr(SB),NOSPLIT,$0-8
    75  	B	runtime∕internal∕atomic·Store(SB)
    76  
    77  TEXT runtime∕internal∕atomic·StorepNoWB(SB),NOSPLIT,$0-8
    78  	B	runtime∕internal∕atomic·Store(SB)
    79  
    80  TEXT runtime∕internal∕atomic·StoreRel(SB),NOSPLIT,$0-8
    81  	B	runtime∕internal∕atomic·Store(SB)
    82  
    83  TEXT runtime∕internal∕atomic·Xadduintptr(SB),NOSPLIT,$0-12
    84  	B	runtime∕internal∕atomic·Xadd(SB)
    85  
    86  TEXT runtime∕internal∕atomic·Loadint64(SB),NOSPLIT,$0-12
    87  	B	runtime∕internal∕atomic·Load64(SB)
    88  
    89  TEXT runtime∕internal∕atomic·Xaddint64(SB),NOSPLIT,$0-20
    90  	B	runtime∕internal∕atomic·Xadd64(SB)
    91  
    92  // 64-bit atomics
    93  // The native ARM implementations use LDREXD/STREXD, which are
    94  // available on ARMv6k or later. We use them only on ARMv7.
    95  // On older ARM, we use Go implementations which simulate 64-bit
    96  // atomics with locks.
    97  
    98  TEXT	armCas64<>(SB),NOSPLIT,$0-21
    99  	MOVW	addr+0(FP), R1
   100  	// make unaligned atomic access panic
   101  	AND.S	$7, R1, R2
   102  	BEQ 	2(PC)
   103  	MOVW	R2, (R2)	// crash. AND.S above left only low 3 bits in R2.
   104  	MOVW	old_lo+4(FP), R2
   105  	MOVW	old_hi+8(FP), R3
   106  	MOVW	new_lo+12(FP), R4
   107  	MOVW	new_hi+16(FP), R5
   108  cas64loop:
   109  	LDREXD	(R1), R6	// loads R6 and R7
   110  	CMP	R2, R6
   111  	BNE	cas64fail
   112  	CMP	R3, R7
   113  	BNE	cas64fail
   114  
   115  	DMB	MB_ISHST
   116  
   117  	STREXD	R4, (R1), R0	// stores R4 and R5
   118  	CMP	$0, R0
   119  	BNE	cas64loop
   120  	MOVW	$1, R0
   121  
   122  	DMB	MB_ISH
   123  
   124  	MOVBU	R0, swapped+20(FP)
   125  	RET
   126  cas64fail:
   127  	MOVW	$0, R0
   128  	MOVBU	R0, swapped+20(FP)
   129  	RET
   130  
   131  TEXT	armXadd64<>(SB),NOSPLIT,$0-20
   132  	MOVW	addr+0(FP), R1
   133  	// make unaligned atomic access panic
   134  	AND.S	$7, R1, R2
   135  	BEQ 	2(PC)
   136  	MOVW	R2, (R2)	// crash. AND.S above left only low 3 bits in R2.
   137  	MOVW	delta_lo+4(FP), R2
   138  	MOVW	delta_hi+8(FP), R3
   139  
   140  add64loop:
   141  	LDREXD	(R1), R4	// loads R4 and R5
   142  	ADD.S	R2, R4
   143  	ADC	R3, R5
   144  
   145  	DMB	MB_ISHST
   146  
   147  	STREXD	R4, (R1), R0	// stores R4 and R5
   148  	CMP	$0, R0
   149  	BNE	add64loop
   150  
   151  	DMB	MB_ISH
   152  
   153  	MOVW	R4, new_lo+12(FP)
   154  	MOVW	R5, new_hi+16(FP)
   155  	RET
   156  
   157  TEXT	armXchg64<>(SB),NOSPLIT,$0-20
   158  	MOVW	addr+0(FP), R1
   159  	// make unaligned atomic access panic
   160  	AND.S	$7, R1, R2
   161  	BEQ 	2(PC)
   162  	MOVW	R2, (R2)	// crash. AND.S above left only low 3 bits in R2.
   163  	MOVW	new_lo+4(FP), R2
   164  	MOVW	new_hi+8(FP), R3
   165  
   166  swap64loop:
   167  	LDREXD	(R1), R4	// loads R4 and R5
   168  
   169  	DMB	MB_ISHST
   170  
   171  	STREXD	R2, (R1), R0	// stores R2 and R3
   172  	CMP	$0, R0
   173  	BNE	swap64loop
   174  
   175  	DMB	MB_ISH
   176  
   177  	MOVW	R4, old_lo+12(FP)
   178  	MOVW	R5, old_hi+16(FP)
   179  	RET
   180  
   181  TEXT	armLoad64<>(SB),NOSPLIT,$0-12
   182  	MOVW	addr+0(FP), R1
   183  	// make unaligned atomic access panic
   184  	AND.S	$7, R1, R2
   185  	BEQ 	2(PC)
   186  	MOVW	R2, (R2)	// crash. AND.S above left only low 3 bits in R2.
   187  
   188  	LDREXD	(R1), R2	// loads R2 and R3
   189  	DMB	MB_ISH
   190  
   191  	MOVW	R2, val_lo+4(FP)
   192  	MOVW	R3, val_hi+8(FP)
   193  	RET
   194  
   195  TEXT	armStore64<>(SB),NOSPLIT,$0-12
   196  	MOVW	addr+0(FP), R1
   197  	// make unaligned atomic access panic
   198  	AND.S	$7, R1, R2
   199  	BEQ 	2(PC)
   200  	MOVW	R2, (R2)	// crash. AND.S above left only low 3 bits in R2.
   201  	MOVW	val_lo+4(FP), R2
   202  	MOVW	val_hi+8(FP), R3
   203  
   204  store64loop:
   205  	LDREXD	(R1), R4	// loads R4 and R5
   206  
   207  	DMB	MB_ISHST
   208  
   209  	STREXD	R2, (R1), R0	// stores R2 and R3
   210  	CMP	$0, R0
   211  	BNE	store64loop
   212  
   213  	DMB	MB_ISH
   214  	RET
   215  
   216  TEXT	·Cas64(SB),NOSPLIT,$0-21
   217  	MOVB	runtime·goarm(SB), R11
   218  	CMP	$7, R11
   219  	BLT	2(PC)
   220  	JMP	armCas64<>(SB)
   221  	JMP	·goCas64(SB)
   222  
   223  TEXT	·Xadd64(SB),NOSPLIT,$0-20
   224  	MOVB	runtime·goarm(SB), R11
   225  	CMP	$7, R11
   226  	BLT	2(PC)
   227  	JMP	armXadd64<>(SB)
   228  	JMP	·goXadd64(SB)
   229  
   230  TEXT	·Xchg64(SB),NOSPLIT,$0-20
   231  	MOVB	runtime·goarm(SB), R11
   232  	CMP	$7, R11
   233  	BLT	2(PC)
   234  	JMP	armXchg64<>(SB)
   235  	JMP	·goXchg64(SB)
   236  
   237  TEXT	·Load64(SB),NOSPLIT,$0-12
   238  	MOVB	runtime·goarm(SB), R11
   239  	CMP	$7, R11
   240  	BLT	2(PC)
   241  	JMP	armLoad64<>(SB)
   242  	JMP	·goLoad64(SB)
   243  
   244  TEXT	·Store64(SB),NOSPLIT,$0-12
   245  	MOVB	runtime·goarm(SB), R11
   246  	CMP	$7, R11
   247  	BLT	2(PC)
   248  	JMP	armStore64<>(SB)
   249  	JMP	·goStore64(SB)