github.com/aloncn/graphics-go@v0.0.1/src/runtime/internal/atomic/asm_386.s (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "textflag.h"
     6  
     7  // bool Cas(int32 *val, int32 old, int32 new)
     8  // Atomically:
     9  //	if(*val == old){
    10  //		*val = new;
    11  //		return 1;
    12  //	}else
    13  //		return 0;
    14  TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-13
    15  	MOVL	ptr+0(FP), BX
    16  	MOVL	old+4(FP), AX
    17  	MOVL	new+8(FP), CX
    18  	LOCK
    19  	CMPXCHGL	CX, 0(BX)
    20  	SETEQ	ret+12(FP)
    21  	RET
    22  
    23  TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-13
    24  	JMP	runtime∕internal∕atomic·Cas(SB)
    25  
    26  TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-8
    27  	JMP	runtime∕internal∕atomic·Load(SB)
    28  
    29  TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-8
    30  	JMP	runtime∕internal∕atomic·Load(SB)
    31  
    32  TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-8
    33  	JMP	runtime∕internal∕atomic·Store(SB)
    34  
    35  TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-8
    36  	JMP runtime∕internal∕atomic·Xadd(SB)
    37  
    38  TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
    39  	JMP runtime∕internal∕atomic·Load64(SB)
    40  
    41  TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-16
    42  	JMP runtime∕internal∕atomic·Xadd64(SB)
    43  
    44  
    45  // bool runtime∕internal∕atomic·Cas64(uint64 *val, uint64 old, uint64 new)
    46  // Atomically:
    47  //	if(*val == *old){
    48  //		*val = new;
    49  //		return 1;
    50  //	} else {
    51  //		return 0;
    52  //	}
    53  TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-21
    54  	MOVL	ptr+0(FP), BP
    55  	MOVL	old_lo+4(FP), AX
    56  	MOVL	old_hi+8(FP), DX
    57  	MOVL	new_lo+12(FP), BX
    58  	MOVL	new_hi+16(FP), CX
    59  	LOCK
    60  	CMPXCHG8B	0(BP)
    61  	SETEQ	ret+20(FP)
    62  	RET
    63  
    64  // bool Casp(void **p, void *old, void *new)
    65  // Atomically:
    66  //	if(*p == old){
    67  //		*p = new;
    68  //		return 1;
    69  //	}else
    70  //		return 0;
    71  TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-13
    72  	MOVL	ptr+0(FP), BX
    73  	MOVL	old+4(FP), AX
    74  	MOVL	new+8(FP), CX
    75  	LOCK
    76  	CMPXCHGL	CX, 0(BX)
    77  	SETEQ	ret+12(FP)
    78  	RET
    79  
    80  // uint32 Xadd(uint32 volatile *val, int32 delta)
    81  // Atomically:
    82  //	*val += delta;
    83  //	return *val;
    84  TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-12
    85  	MOVL	ptr+0(FP), BX
    86  	MOVL	delta+4(FP), AX
    87  	MOVL	AX, CX
    88  	LOCK
    89  	XADDL	AX, 0(BX)
    90  	ADDL	CX, AX
    91  	MOVL	AX, ret+8(FP)
    92  	RET
    93  
    94  TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-12
    95  	MOVL	ptr+0(FP), BX
    96  	MOVL	new+4(FP), AX
    97  	XCHGL	AX, 0(BX)
    98  	MOVL	AX, ret+8(FP)
    99  	RET
   100  
   101  TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-12
   102  	JMP	runtime∕internal∕atomic·Xchg(SB)
   103  
   104  
   105  TEXT runtime∕internal∕atomic·Storep1(SB), NOSPLIT, $0-8
   106  	MOVL	ptr+0(FP), BX
   107  	MOVL	val+4(FP), AX
   108  	XCHGL	AX, 0(BX)
   109  	RET
   110  
   111  TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8
   112  	MOVL	ptr+0(FP), BX
   113  	MOVL	val+4(FP), AX
   114  	XCHGL	AX, 0(BX)
   115  	RET
   116  
   117  // uint64 atomicload64(uint64 volatile* addr);
   118  TEXT runtime∕internal∕atomic·Load64(SB), NOSPLIT, $0-12
   119  	MOVL	ptr+0(FP), AX
   120  	TESTL	$7, AX
   121  	JZ	2(PC)
   122  	MOVL	0, AX // crash with nil ptr deref
   123  	LEAL	ret_lo+4(FP), BX
   124  	// MOVQ (%EAX), %MM0
   125  	BYTE $0x0f; BYTE $0x6f; BYTE $0x00
   126  	// MOVQ %MM0, 0(%EBX)
   127  	BYTE $0x0f; BYTE $0x7f; BYTE $0x03
   128  	// EMMS
   129  	BYTE $0x0F; BYTE $0x77
   130  	RET
   131  
   132  // void runtime∕internal∕atomic·Store64(uint64 volatile* addr, uint64 v);
   133  TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-12
   134  	MOVL	ptr+0(FP), AX
   135  	TESTL	$7, AX
   136  	JZ	2(PC)
   137  	MOVL	0, AX // crash with nil ptr deref
   138  	// MOVQ and EMMS were introduced on the Pentium MMX.
   139  	// MOVQ 0x8(%ESP), %MM0
   140  	BYTE $0x0f; BYTE $0x6f; BYTE $0x44; BYTE $0x24; BYTE $0x08
   141  	// MOVQ %MM0, (%EAX)
   142  	BYTE $0x0f; BYTE $0x7f; BYTE $0x00 
   143  	// EMMS
   144  	BYTE $0x0F; BYTE $0x77
   145  	// This is essentially a no-op, but it provides required memory fencing.
   146  	// It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
   147  	MOVL	$0, AX
   148  	LOCK
   149  	XADDL	AX, (SP)
   150  	RET
   151  
   152  // void	runtime∕internal∕atomic·Or8(byte volatile*, byte);
   153  TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-5
   154  	MOVL	ptr+0(FP), AX
   155  	MOVB	val+4(FP), BX
   156  	LOCK
   157  	ORB	BX, (AX)
   158  	RET
   159  
   160  // void	runtime∕internal∕atomic·And8(byte volatile*, byte);
   161  TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-5
   162  	MOVL	ptr+0(FP), AX
   163  	MOVB	val+4(FP), BX
   164  	LOCK
   165  	ANDB	BX, (AX)
   166  	RET