github.com/hongwozai/go-src-1.4.3@v0.0.0-20191127132709-dc3fce3dbccb/src/sync/atomic/asm_arm.s (about)

     1  // Copyright 2011 The Go Authors.  All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build !race
     6  
     7  #include "textflag.h"
     8  
     9  // ARM atomic operations, for use by asm_$(GOOS)_arm.s.
    10  
    11  TEXT ·armCompareAndSwapUint32(SB),NOSPLIT,$0-13
    12  	MOVW	addr+0(FP), R1
    13  	MOVW	old+4(FP), R2
    14  	MOVW	new+8(FP), R3
    15  casloop:
    16  	// LDREX and STREX were introduced in ARMv6.
    17  	LDREX	(R1), R0
    18  	CMP	R0, R2
    19  	BNE	casfail
    20  	STREX	R3, (R1), R0
    21  	CMP	$0, R0
    22  	BNE	casloop
    23  	MOVW	$1, R0
    24  	MOVBU	R0, ret+12(FP)
    25  	RET
    26  casfail:
    27  	MOVW	$0, R0
    28  	MOVBU	R0, ret+12(FP)
    29  	RET
    30  
    31  TEXT ·armCompareAndSwapUint64(SB),NOSPLIT,$0-21
    32  	BL	fastCheck64<>(SB)
    33  	MOVW	addr+0(FP), R1
    34  	// make unaligned atomic access panic
    35  	AND.S	$7, R1, R2
    36  	BEQ 	2(PC)
    37  	MOVW	R2, (R2)
    38  	MOVW	oldlo+4(FP), R2
    39  	MOVW	oldhi+8(FP), R3
    40  	MOVW	newlo+12(FP), R4
    41  	MOVW	newhi+16(FP), R5
    42  cas64loop:
    43  	// LDREXD and STREXD were introduced in ARMv6k.
    44  	LDREXD	(R1), R6  // loads R6 and R7
    45  	CMP	R2, R6
    46  	BNE	cas64fail
    47  	CMP	R3, R7
    48  	BNE	cas64fail
    49  	STREXD	R4, (R1), R0	// stores R4 and R5
    50  	CMP	$0, R0
    51  	BNE	cas64loop
    52  	MOVW	$1, R0
    53  	MOVBU	R0, ret+20(FP)
    54  	RET
    55  cas64fail:
    56  	MOVW	$0, R0
    57  	MOVBU	R0, ret+20(FP)
    58  	RET
    59  
    60  TEXT ·armAddUint32(SB),NOSPLIT,$0-12
    61  	MOVW	addr+0(FP), R1
    62  	MOVW	delta+4(FP), R2
    63  addloop:
    64  	// LDREX and STREX were introduced in ARMv6.
    65  	LDREX	(R1), R3
    66  	ADD	R2, R3
    67  	STREX	R3, (R1), R0
    68  	CMP	$0, R0
    69  	BNE	addloop
    70  	MOVW	R3, ret+8(FP)
    71  	RET
    72  
    73  TEXT ·armAddUint64(SB),NOSPLIT,$0-20
    74  	BL	fastCheck64<>(SB)
    75  	MOVW	addr+0(FP), R1
    76  	// make unaligned atomic access panic
    77  	AND.S	$7, R1, R2
    78  	BEQ 	2(PC)
    79  	MOVW	R2, (R2)
    80  	MOVW	deltalo+4(FP), R2
    81  	MOVW	deltahi+8(FP), R3
    82  add64loop:
    83  	// LDREXD and STREXD were introduced in ARMv6k.
    84  	LDREXD	(R1), R4	// loads R4 and R5
    85  	ADD.S	R2, R4
    86  	ADC	R3, R5
    87  	STREXD	R4, (R1), R0	// stores R4 and R5
    88  	CMP	$0, R0
    89  	BNE	add64loop
    90  	MOVW	R4, retlo+12(FP)
    91  	MOVW	R5, rethi+16(FP)
    92  	RET
    93  
    94  TEXT ·armSwapUint32(SB),NOSPLIT,$0-12
    95  	MOVW	addr+0(FP), R1
    96  	MOVW	new+4(FP), R2
    97  swaploop:
    98  	// LDREX and STREX were introduced in ARMv6.
    99  	LDREX	(R1), R3
   100  	STREX	R2, (R1), R0
   101  	CMP	$0, R0
   102  	BNE	swaploop
   103  	MOVW	R3, old+8(FP)
   104  	RET
   105  
   106  TEXT ·armSwapUint64(SB),NOSPLIT,$0-20
   107  	BL	fastCheck64<>(SB)
   108  	MOVW	addr+0(FP), R1
   109  	// make unaligned atomic access panic
   110  	AND.S	$7, R1, R2
   111  	BEQ 	2(PC)
   112  	MOVW	R2, (R2)
   113  	MOVW	newlo+4(FP), R2
   114  	MOVW	newhi+8(FP), R3
   115  swap64loop:
   116  	// LDREXD and STREXD were introduced in ARMv6k.
   117  	LDREXD	(R1), R4	// loads R4 and R5
   118  	STREXD	R2, (R1), R0	// stores R2 and R3
   119  	CMP	$0, R0
   120  	BNE	swap64loop
   121  	MOVW	R4, oldlo+12(FP)
   122  	MOVW	R5, oldhi+16(FP)
   123  	RET
   124  
   125  TEXT ·armLoadUint64(SB),NOSPLIT,$0-12
   126  	BL	fastCheck64<>(SB)
   127  	MOVW	addr+0(FP), R1
   128  	// make unaligned atomic access panic
   129  	AND.S	$7, R1, R2
   130  	BEQ 	2(PC)
   131  	MOVW	R2, (R2)
   132  load64loop:
   133  	LDREXD	(R1), R2	// loads R2 and R3
   134  	STREXD	R2, (R1), R0	// stores R2 and R3
   135  	CMP	$0, R0
   136  	BNE	load64loop
   137  	MOVW	R2, vallo+4(FP)
   138  	MOVW	R3, valhi+8(FP)
   139  	RET
   140  
   141  TEXT ·armStoreUint64(SB),NOSPLIT,$0-12
   142  	BL	fastCheck64<>(SB)
   143  	MOVW	addr+0(FP), R1
   144  	// make unaligned atomic access panic
   145  	AND.S	$7, R1, R2
   146  	BEQ 	2(PC)
   147  	MOVW	R2, (R2)
   148  	MOVW	vallo+4(FP), R2
   149  	MOVW	valhi+8(FP), R3
   150  store64loop:
   151  	LDREXD	(R1), R4	// loads R4 and R5
   152  	STREXD	R2, (R1), R0	// stores R2 and R3
   153  	CMP	$0, R0
   154  	BNE	store64loop
   155  	RET
   156  
   157  // Check for broken 64-bit LDREXD as found in QEMU.
   158  // LDREXD followed by immediate STREXD should succeed.
   159  // If it fails, try a few times just to be sure (maybe our thread got
   160  // rescheduled between the two instructions) and then panic.
   161  // A bug in some copies of QEMU makes STREXD never succeed,
   162  // which will make uses of the 64-bit atomic operations loop forever.
   163  // If things are working, set okLDREXD to avoid future checks.
   164  // https://bugs.launchpad.net/qemu/+bug/670883.
   165  TEXT	check64<>(SB),NOSPLIT,$16-0
   166  	MOVW	$10, R1
   167  	// 8-aligned stack address scratch space.
   168  	MOVW	$8(R13), R5
   169  	AND	$~7, R5
   170  loop:
   171  	LDREXD	(R5), R2
   172  	STREXD	R2, (R5), R0
   173  	CMP	$0, R0
   174  	BEQ	ok
   175  	SUB	$1, R1
   176  	CMP	$0, R1
   177  	BNE	loop
   178  	// Must be buggy QEMU.
   179  	BL	·panic64(SB)
   180  ok:
   181  	RET
   182  
   183  // Fast, cached version of check.  No frame, just MOVW CMP RET after first time.
   184  TEXT	fastCheck64<>(SB),NOSPLIT,$-4
   185  	MOVW	ok64<>(SB), R0
   186  	CMP	$0, R0	// have we been here before?
   187  	RET.NE
   188  	B	slowCheck64<>(SB)
   189  
   190  TEXT slowCheck64<>(SB),NOSPLIT,$0-0
   191  	BL	check64<>(SB)
   192  	// Still here, must be okay.
   193  	MOVW	$1, R0
   194  	MOVW	R0, ok64<>(SB)
   195  	RET
   196  
   197  GLOBL ok64<>(SB), NOPTR, $4