github.com/primecitizens/pcz/std@v0.2.1/core/atomic/atomic_amd64.s (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright 2023 The Prime Citizens
     3  //
     4  // Copyright 2015 The Go Authors. All rights reserved.
     5  // Use of this source code is governed by a BSD-style
     6  // license that can be found in the LICENSE file.
     7  
     8  // Note: some of these functions are semantically inlined
     9  // by the compiler (in src/cmd/compile/internal/gc/ssa.go).
    10  
    11  //go:build amd64
    12  
    13  #include "textflag.h"
    14  
    15  TEXT ·PublicationBarrier(SB),NOSPLIT,$0-0
    16  	// Stores are already ordered on x86, so this is just a
    17  	// compile barrier.
    18  	RET
    19  
    20  TEXT ·Store32(SB), NOSPLIT, $0-12
    21  	MOVQ ptr+0(FP), BX
    22  	MOVL val+8(FP), AX
    23  	XCHGL AX, 0(BX)
    24  	RET
    25  
    26  TEXT ·Store8(SB), NOSPLIT, $0-9
    27  	MOVQ ptr+0(FP), BX
    28  	MOVB val+8(FP), AX
    29  	XCHGB AX, 0(BX)
    30  	RET
    31  
    32  TEXT ·Store64(SB), NOSPLIT, $0-16
    33  	MOVQ ptr+0(FP), BX
    34  	MOVQ val+8(FP), AX
    35  	XCHGQ AX, 0(BX)
    36  	RET
    37  
    38  TEXT ·StoreUintptr(SB), NOSPLIT, $0-16
    39  	JMP ·Store64(SB)
    40  
    41  TEXT ·StorePointer(SB), NOSPLIT, $0-16
    42  	MOVQ ptr+0(FP), BX
    43  	MOVQ val+8(FP), AX
    44  	XCHGQ AX, 0(BX)
    45  	RET
    46  
    47  TEXT ·StoreInt32(SB), NOSPLIT, $0-12
    48  	JMP ·Store32(SB)
    49  
    50  TEXT ·StoreInt64(SB), NOSPLIT, $0-16
    51  	JMP ·Store64(SB)
    52  
    53  //
    54  // StoreRel
    55  //
    56  
    57  TEXT ·StoreRel32(SB), NOSPLIT, $0-12
    58  	JMP ·Store32(SB)
    59  
    60  TEXT ·StoreRel64(SB), NOSPLIT, $0-16
    61  	JMP ·Store64(SB)
    62  
    63  TEXT ·StoreRelUintptr(SB), NOSPLIT, $0-16
    64  	JMP ·Store64(SB)
    65  
    66  //
    67  // Load
    68  //
    69  
    70  TEXT ·LoadUintptr(SB), NOSPLIT, $0-16
    71  	JMP ·Load64(SB)
    72  
    73  TEXT ·LoadUint(SB), NOSPLIT, $0-16
    74  	JMP ·Load64(SB)
    75  
    76  TEXT ·LoadInt32(SB), NOSPLIT, $0-12
    77  	JMP ·Load32(SB)
    78  
    79  TEXT ·LoadInt64(SB), NOSPLIT, $0-16
    80  	JMP ·Load64(SB)
    81  
    82  //
    83  // bitwise
    84  //
    85  
    86  // void ·Or8(byte volatile*, byte);
    87  TEXT ·Or8(SB), NOSPLIT, $0-9
    88  	MOVQ ptr+0(FP), AX
    89  	MOVB val+8(FP), BX
    90  	LOCK
    91  	ORB BX, (AX)
    92  	RET
    93  
    94  // func Or32(addr *uint32, v uint32)
    95  TEXT ·Or32(SB), NOSPLIT, $0-12
    96  	MOVQ ptr+0(FP), AX
    97  	MOVL val+8(FP), BX
    98  	LOCK
    99  	ORL BX, (AX)
   100  	RET
   101  
   102  // void ·And8(byte volatile*, byte);
   103  TEXT ·And8(SB), NOSPLIT, $0-9
   104  	MOVQ ptr+0(FP), AX
   105  	MOVB val+8(FP), BX
   106  	LOCK
   107  	ANDB BX, (AX)
   108  	RET
   109  
   110  // func And32(addr *uint32, v uint32)
   111  TEXT ·And32(SB), NOSPLIT, $0-12
   112  	MOVQ ptr+0(FP), AX
   113  	MOVL val+8(FP), BX
   114  	LOCK
   115  	ANDL BX, (AX)
   116  	RET
   117  
   118  //
   119  // Swap
   120  //
   121  
   122  // uint32 Swap32(ptr *uint32, new uint32)
   123  TEXT ·Swap32(SB), NOSPLIT, $0-20
   124  	MOVQ ptr+0(FP), BX
   125  	MOVL new+8(FP), AX
   126  	XCHGL AX, 0(BX)
   127  	MOVL AX, ret+16(FP)
   128  	RET
   129  
   130  // uint64 Swap64(ptr *uint64, new uint64)
   131  TEXT ·Swap64(SB), NOSPLIT, $0-24
   132  	MOVQ ptr+0(FP), BX
   133  	MOVQ new+8(FP), AX
   134  	XCHGQ AX, 0(BX)
   135  	MOVQ AX, ret+16(FP)
   136  	RET
   137  
   138  TEXT ·SwapUintptr(SB), NOSPLIT, $0-24
   139  	JMP ·Swap64(SB)
   140  
   141  TEXT ·SwapInt32(SB), NOSPLIT, $0-20
   142  	JMP ·Swap32(SB)
   143  
   144  TEXT ·SwapInt64(SB), NOSPLIT, $0-24
   145  	JMP ·Swap64(SB)
   146  
   147  //
   148  // Add
   149  //
   150  
   151  // uint32 Add32(uint32 volatile *val, int32 delta)
   152  TEXT ·Add32(SB), NOSPLIT, $0-20
   153  	MOVQ ptr+0(FP), BX
   154  	MOVL delta+8(FP), AX
   155  	MOVL AX, CX
   156  	LOCK
   157  	XADDL AX, 0(BX)
   158  	ADDL CX, AX
   159  	MOVL AX, ret+16(FP)
   160  	RET
   161  
   162  // uint64 Add64(uint64 volatile *val, int64 delta)
   163  TEXT ·Add64(SB), NOSPLIT, $0-24
   164  	MOVQ ptr+0(FP), BX
   165  	MOVQ delta+8(FP), AX
   166  	MOVQ AX, CX
   167  	LOCK
   168  	XADDQ AX, 0(BX)
   169  	ADDQ CX, AX
   170  	MOVQ AX, ret+16(FP)
   171  	RET
   172  
   173  TEXT ·AddUintptr(SB), NOSPLIT, $0-24
   174  	JMP ·Add64(SB)
   175  
   176  TEXT ·AddInt32(SB), NOSPLIT, $0-20
   177  	JMP ·Add32(SB)
   178  
   179  TEXT ·AddInt64(SB), NOSPLIT, $0-24
   180  	JMP ·Add64(SB)
   181  
   182  //
   183  // Compare and swap
   184  //
   185  
   186  // bool Cas32(int32 *val, int32 old, int32 new)
   187  TEXT ·Cas32(SB),NOSPLIT,$0-17
   188  	MOVQ ptr+0(FP), BX
   189  	MOVL old+8(FP), AX
   190  	MOVL new+12(FP), CX
   191  	LOCK
   192  	CMPXCHGL CX, 0(BX)
   193  	SETEQ ret+16(FP)
   194  	RET
   195  
   196  // bool ·Cas64(uint64 *val, uint64 old, uint64 new)
   197  TEXT ·Cas64(SB), NOSPLIT, $0-25
   198  	MOVQ ptr+0(FP), BX
   199  	MOVQ old+8(FP), AX
   200  	MOVQ new+16(FP), CX
   201  	LOCK
   202  	CMPXCHGQ CX, 0(BX)
   203  	SETEQ ret+24(FP)
   204  	RET
   205  
   206  TEXT ·CasUintptr(SB), NOSPLIT, $0-25
   207  	JMP ·Cas64(SB)
   208  
   209  // bool CasUnsafePointer(void **val, void *old, void *new)
   210  TEXT ·CasUnsafePointer(SB), NOSPLIT, $0-25
   211  	MOVQ ptr+0(FP), BX
   212  	MOVQ old+8(FP), AX
   213  	MOVQ new+16(FP), CX
   214  	LOCK
   215  	CMPXCHGQ CX, 0(BX)
   216  	SETEQ ret+24(FP)
   217  	RET
   218  
   219  TEXT ·CasInt32(SB), NOSPLIT, $0-17
   220  	JMP ·Cas32(SB)
   221  
   222  TEXT ·CasInt64(SB), NOSPLIT, $0-25
   223  	JMP ·Cas64(SB)
   224  
   225  //
   226  // CasRel
   227  //
   228  
   229  TEXT ·CasRel32(SB), NOSPLIT, $0-17
   230  	JMP ·Cas32(SB)