github.com/xushiwei/go@v0.0.0-20130601165731-2b9d83f45bc9/src/pkg/sync/atomic/asm_arm.s (about)

     1  // Copyright 2011 The Go Authors.  All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build !race
     6  
     7  // ARM atomic operations, for use by asm_$(GOOS)_arm.s.
     8  
     9  TEXT ·armCompareAndSwapUint32(SB),7,$0
    10  	MOVW	addr+0(FP), R1
    11  	MOVW	old+4(FP), R2
    12  	MOVW	new+8(FP), R3
    13  casloop:
    14  	// LDREX and STREX were introduced in ARM 6.
    15  	LDREX	(R1), R0
    16  	CMP	R0, R2
    17  	BNE	casfail
    18  	STREX	R3, (R1), R0
    19  	CMP	$0, R0
    20  	BNE	casloop
    21  	MOVW	$1, R0
    22  	MOVBU	R0, ret+12(FP)
    23  	RET
    24  casfail:
    25  	MOVW	$0, R0
    26  	MOVBU	R0, ret+12(FP)
    27  	RET
    28  
    29  TEXT ·armCompareAndSwapUint64(SB),7,$0
    30  	BL	fastCheck64<>(SB)
    31  	MOVW	addr+0(FP), R1
    32  	// make unaligned atomic access panic
    33  	AND.S	$7, R1, R2
    34  	BEQ 	2(PC)
    35  	MOVW	R2, (R2)
    36  	MOVW	oldlo+4(FP), R2
    37  	MOVW	oldhi+8(FP), R3
    38  	MOVW	newlo+12(FP), R4
    39  	MOVW	newhi+16(FP), R5
    40  cas64loop:
    41  	// LDREXD and STREXD were introduced in ARM 11.
    42  	LDREXD	(R1), R6  // loads R6 and R7
    43  	CMP	R2, R6
    44  	BNE	cas64fail
    45  	CMP	R3, R7
    46  	BNE	cas64fail
    47  	STREXD	R4, (R1), R0	// stores R4 and R5
    48  	CMP	$0, R0
    49  	BNE	cas64loop
    50  	MOVW	$1, R0
    51  	MOVBU	R0, ret+20(FP)
    52  	RET
    53  cas64fail:
    54  	MOVW	$0, R0
    55  	MOVBU	R0, ret+20(FP)
    56  	RET
    57  
    58  TEXT ·armAddUint32(SB),7,$0
    59  	MOVW	addr+0(FP), R1
    60  	MOVW	delta+4(FP), R2
    61  addloop:
    62  	// LDREX and STREX were introduced in ARM 6.
    63  	LDREX	(R1), R3
    64  	ADD	R2, R3
    65  	STREX	R3, (R1), R0
    66  	CMP	$0, R0
    67  	BNE	addloop
    68  	MOVW	R3, ret+8(FP)
    69  	RET
    70  
    71  TEXT ·armAddUint64(SB),7,$0
    72  	BL	fastCheck64<>(SB)
    73  	MOVW	addr+0(FP), R1
    74  	// make unaligned atomic access panic
    75  	AND.S	$7, R1, R2
    76  	BEQ 	2(PC)
    77  	MOVW	R2, (R2)
    78  	MOVW	deltalo+4(FP), R2
    79  	MOVW	deltahi+8(FP), R3
    80  add64loop:
    81  	// LDREXD and STREXD were introduced in ARM 11.
    82  	LDREXD	(R1), R4	// loads R4 and R5
    83  	ADD.S	R2, R4
    84  	ADC	R3, R5
    85  	STREXD	R4, (R1), R0	// stores R4 and R5
    86  	CMP	$0, R0
    87  	BNE	add64loop
    88  	MOVW	R4, retlo+12(FP)
    89  	MOVW	R5, rethi+16(FP)
    90  	RET
    91  
    92  TEXT ·armLoadUint64(SB),7,$0
    93  	BL	fastCheck64<>(SB)
    94  	MOVW	addr+0(FP), R1
    95  	// make unaligned atomic access panic
    96  	AND.S	$7, R1, R2
    97  	BEQ 	2(PC)
    98  	MOVW	R2, (R2)
    99  load64loop:
   100  	LDREXD	(R1), R2	// loads R2 and R3
   101  	STREXD	R2, (R1), R0	// stores R2 and R3
   102  	CMP	$0, R0
   103  	BNE	load64loop
   104  	MOVW	R2, vallo+4(FP)
   105  	MOVW	R3, valhi+8(FP)
   106  	RET
   107  
   108  TEXT ·armStoreUint64(SB),7,$0
   109  	BL	fastCheck64<>(SB)
   110  	MOVW	addr+0(FP), R1
   111  	// make unaligned atomic access panic
   112  	AND.S	$7, R1, R2
   113  	BEQ 	2(PC)
   114  	MOVW	R2, (R2)
   115  	MOVW	vallo+4(FP), R2
   116  	MOVW	valhi+8(FP), R3
   117  store64loop:
   118  	LDREXD	(R1), R4	// loads R4 and R5
   119  	STREXD	R2, (R1), R0	// stores R2 and R3
   120  	CMP	$0, R0
   121  	BNE	store64loop
   122  	RET
   123  
   124  // Check for broken 64-bit LDREXD as found in QEMU.
   125  // LDREXD followed by immediate STREXD should succeed.
   126  // If it fails, try a few times just to be sure (maybe our thread got
   127  // rescheduled between the two instructions) and then panic.
   128  // A bug in some copies of QEMU makes STREXD never succeed,
   129  // which will make uses of the 64-bit atomic operations loop forever.
   130  // If things are working, set okLDREXD to avoid future checks.
   131  // https://bugs.launchpad.net/qemu/+bug/670883.
   132  TEXT	check64<>(SB),7,$16
   133  	MOVW	$10, R1
   134  	// 8-aligned stack address scratch space.
   135  	MOVW	$8(R13), R5
   136  	AND	$~7, R5
   137  loop:
   138  	LDREXD	(R5), R2
   139  	STREXD	R2, (R5), R0
   140  	CMP	$0, R0
   141  	BEQ	ok
   142  	SUB	$1, R1
   143  	CMP	$0, R1
   144  	BNE	loop
   145  	// Must be buggy QEMU.
   146  	BL	·panic64(SB)
   147  ok:
   148  	RET
   149  
   150  // Fast, cached version of check.  No frame, just MOVW CMP RET after first time.
   151  TEXT	fastCheck64<>(SB),7,$-4
   152  	MOVW	ok64<>(SB), R0
   153  	CMP	$0, R0	// have we been here before?
   154  	RET.NE
   155  	B	slowCheck64<>(SB)
   156  
   157  TEXT slowCheck64<>(SB),7,$0
   158  	BL	check64<>(SB)
   159  	// Still here, must be okay.
   160  	MOVW	$1, R0
   161  	MOVW	R0, ok64<>(SB)
   162  	RET
   163  
   164  GLOBL ok64<>(SB), $4