github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/internal/atomic/asm_amd64.s (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Note: some of these functions are semantically inlined
     6  // by the compiler (in src/cmd/compile/internal/gc/ssa.go).
     7  
     8  #include "textflag.h"
     9  
    10  // bool Cas(int32 *val, int32 old, int32 new)
    11  // Atomically:
    12  //	if(*val == old){
    13  //		*val = new;
    14  //		return 1;
    15  //	} else
    16  //		return 0;
    17  TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0-17
    18  	MOVQ	ptr+0(FP), BX
    19  	MOVL	old+8(FP), AX
    20  	MOVL	new+12(FP), CX
    21  	LOCK
    22  	CMPXCHGL	CX, 0(BX)
    23  	SETEQ	ret+16(FP)
    24  	RET
    25  
    26  // bool	runtime∕internal∕atomic·Cas64(uint64 *val, uint64 old, uint64 new)
    27  // Atomically:
    28  //	if(*val == *old){
    29  //		*val = new;
    30  //		return 1;
    31  //	} else {
    32  //		return 0;
    33  //	}
    34  TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-25
    35  	MOVQ	ptr+0(FP), BX
    36  	MOVQ	old+8(FP), AX
    37  	MOVQ	new+16(FP), CX
    38  	LOCK
    39  	CMPXCHGQ	CX, 0(BX)
    40  	SETEQ	ret+24(FP)
    41  	RET
    42  
    43  TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-25
    44  	JMP	runtime∕internal∕atomic·Cas64(SB)
    45  
    46  TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-17
    47  	JMP	runtime∕internal∕atomic·Cas(SB)
    48  
    49  TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-16
    50  	JMP	runtime∕internal∕atomic·Load64(SB)
    51  
    52  TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-16
    53  	JMP	runtime∕internal∕atomic·Load64(SB)
    54  
    55  TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-16
    56  	JMP	runtime∕internal∕atomic·Store64(SB)
    57  
    58  TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-16
    59  	JMP	runtime∕internal∕atomic·Load64(SB)
    60  
    61  TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-24
    62  	JMP	runtime∕internal∕atomic·Xadd64(SB)
    63  
    64  // bool Casp1(void **val, void *old, void *new)
    65  // Atomically:
    66  //	if(*val == old){
    67  //		*val = new;
    68  //		return 1;
    69  //	} else
    70  //		return 0;
    71  TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-25
    72  	MOVQ	ptr+0(FP), BX
    73  	MOVQ	old+8(FP), AX
    74  	MOVQ	new+16(FP), CX
    75  	LOCK
    76  	CMPXCHGQ	CX, 0(BX)
    77  	SETEQ	ret+24(FP)
    78  	RET
    79  
    80  // uint32 Xadd(uint32 volatile *val, int32 delta)
    81  // Atomically:
    82  //	*val += delta;
    83  //	return *val;
    84  TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-20
    85  	MOVQ	ptr+0(FP), BX
    86  	MOVL	delta+8(FP), AX
    87  	MOVL	AX, CX
    88  	LOCK
    89  	XADDL	AX, 0(BX)
    90  	ADDL	CX, AX
    91  	MOVL	AX, ret+16(FP)
    92  	RET
    93  
    94  TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-24
    95  	MOVQ	ptr+0(FP), BX
    96  	MOVQ	delta+8(FP), AX
    97  	MOVQ	AX, CX
    98  	LOCK
    99  	XADDQ	AX, 0(BX)
   100  	ADDQ	CX, AX
   101  	MOVQ	AX, ret+16(FP)
   102  	RET
   103  
   104  TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-24
   105  	JMP	runtime∕internal∕atomic·Xadd64(SB)
   106  
   107  TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-20
   108  	MOVQ	ptr+0(FP), BX
   109  	MOVL	new+8(FP), AX
   110  	XCHGL	AX, 0(BX)
   111  	MOVL	AX, ret+16(FP)
   112  	RET
   113  
   114  TEXT runtime∕internal∕atomic·Xchg64(SB), NOSPLIT, $0-24
   115  	MOVQ	ptr+0(FP), BX
   116  	MOVQ	new+8(FP), AX
   117  	XCHGQ	AX, 0(BX)
   118  	MOVQ	AX, ret+16(FP)
   119  	RET
   120  
   121  TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-24
   122  	JMP	runtime∕internal∕atomic·Xchg64(SB)
   123  
   124  TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-16
   125  	MOVQ	ptr+0(FP), BX
   126  	MOVQ	val+8(FP), AX
   127  	XCHGQ	AX, 0(BX)
   128  	RET
   129  
   130  TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-12
   131  	MOVQ	ptr+0(FP), BX
   132  	MOVL	val+8(FP), AX
   133  	XCHGL	AX, 0(BX)
   134  	RET
   135  
   136  TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-12
   137  	JMP	runtime∕internal∕atomic·Store(SB)
   138  
   139  TEXT runtime∕internal∕atomic·StoreRel64(SB), NOSPLIT, $0-16
   140  	JMP	runtime∕internal∕atomic·Store64(SB)
   141  
   142  TEXT runtime∕internal∕atomic·StoreReluintptr(SB), NOSPLIT, $0-16
   143  	JMP	runtime∕internal∕atomic·Store64(SB)
   144  
   145  TEXT runtime∕internal∕atomic·Store8(SB), NOSPLIT, $0-9
   146  	MOVQ	ptr+0(FP), BX
   147  	MOVB	val+8(FP), AX
   148  	XCHGB	AX, 0(BX)
   149  	RET
   150  
   151  TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-16
   152  	MOVQ	ptr+0(FP), BX
   153  	MOVQ	val+8(FP), AX
   154  	XCHGQ	AX, 0(BX)
   155  	RET
   156  
   157  // void	runtime∕internal∕atomic·Or8(byte volatile*, byte);
   158  TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-9
   159  	MOVQ	ptr+0(FP), AX
   160  	MOVB	val+8(FP), BX
   161  	LOCK
   162  	ORB	BX, (AX)
   163  	RET
   164  
   165  // void	runtime∕internal∕atomic·And8(byte volatile*, byte);
   166  TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-9
   167  	MOVQ	ptr+0(FP), AX
   168  	MOVB	val+8(FP), BX
   169  	LOCK
   170  	ANDB	BX, (AX)
   171  	RET
   172  
   173  // func Or(addr *uint32, v uint32)
   174  TEXT runtime∕internal∕atomic·Or(SB), NOSPLIT, $0-12
   175  	MOVQ	ptr+0(FP), AX
   176  	MOVL	val+8(FP), BX
   177  	LOCK
   178  	ORL	BX, (AX)
   179  	RET
   180  
   181  // func And(addr *uint32, v uint32)
   182  TEXT runtime∕internal∕atomic·And(SB), NOSPLIT, $0-12
   183  	MOVQ	ptr+0(FP), AX
   184  	MOVL	val+8(FP), BX
   185  	LOCK
   186  	ANDL	BX, (AX)
   187  	RET