github.com/primecitizens/pcz/std@v0.2.1/core/atomic/atomic_386.s (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright 2023 The Prime Citizens
     3  //
     4  // Copyright 2015 The Go Authors. All rights reserved.
     5  // Use of this source code is governed by a BSD-style
     6  // license that can be found in the LICENSE file.
     7  
     8  //go:build 386
     9  
    10  #include "textflag.h"
    11  #include "funcdata.h"
    12  
    13  TEXT ·PublicationBarrier(SB),NOSPLIT,$0-0
    14  	// Stores are already ordered on x86, so this is just a
    15  	// compile barrier.
    16  	RET
    17  
    18  //
    19  // Store
    20  //
    21  
    22  TEXT ·Store8(SB), NOSPLIT, $0-5
    23  	MOVL ptr+0(FP), BX
    24  	MOVB val+4(FP), AX
    25  	XCHGB AX, 0(BX)
    26  	RET
    27  
    28  TEXT ·Store32(SB), NOSPLIT, $0-8
    29  	MOVL ptr+0(FP), BX
    30  	MOVL val+4(FP), AX
    31  	XCHGL AX, 0(BX)
    32  	RET
    33  
    34  // void ·Store64(uint64 volatile* addr, uint64 v);
    35  TEXT ·Store64(SB), NOSPLIT, $0-12
    36  	NO_LOCAL_POINTERS
    37  	MOVL ptr+0(FP), AX
    38  	TESTL $7, AX
    39  	JZ 2(PC)
    40  	CALL ·panicUnaligned(SB)
    41  	// MOVQ and EMMS were introduced on the Pentium MMX.
    42  	MOVQ val+4(FP), M0
    43  	MOVQ M0, (AX)
    44  	EMMS
    45  	// This is essentially a no-op, but it provides required memory fencing.
    46  	// It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
    47  	XORL AX, AX
    48  	LOCK
    49  	XADDL AX, (SP)
    50  	RET
    51  
    52  TEXT ·StoreUintptr(SB), NOSPLIT, $0-8
    53  	JMP ·Store32(SB)
    54  
    55  TEXT ·StorePointer(SB), NOSPLIT, $0-8
    56  	MOVL ptr+0(FP), BX
    57  	MOVL val+4(FP), AX
    58  	XCHGL AX, 0(BX)
    59  	RET
    60  
    61  TEXT ·StoreInt32(SB), NOSPLIT, $0-8
    62  	JMP ·Store32(SB)
    63  
    64  TEXT ·StoreInt64(SB), NOSPLIT, $0-12
    65  	JMP ·Store64(SB)
    66  
    67  //
    68  // StoreRel
    69  //
    70  
    71  TEXT ·StoreRel32(SB), NOSPLIT, $0-8
    72  	JMP ·Store32(SB)
    73  
    74  TEXT ·StoreRelUintptr(SB), NOSPLIT, $0-8
    75  	JMP ·Store32(SB)
    76  
    77  //
    78  // Load
    79  //
    80  
    81  // uint64 atomicload64(uint64 volatile* addr);
    82  TEXT ·Load64(SB), NOSPLIT, $0-12
    83  	NO_LOCAL_POINTERS
    84  	MOVL ptr+0(FP), AX
    85  	TESTL $7, AX
    86  	JZ 2(PC)
    87  	CALL ·panicUnaligned(SB)
    88  	MOVQ (AX), M0
    89  	MOVQ M0, ret+4(FP)
    90  	EMMS
    91  	RET
    92  
    93  TEXT ·LoadUintptr(SB), NOSPLIT, $0-8
    94  	JMP ·Load32(SB)
    95  
    96  TEXT ·LoadUint(SB), NOSPLIT, $0-8
    97  	JMP ·Load32(SB)
    98  
    99  TEXT ·LoadInt32(SB), NOSPLIT, $0-8
   100  	JMP ·Load32(SB)
   101  
   102  TEXT ·LoadInt64(SB), NOSPLIT, $0-12
   103  	JMP ·Load64(SB)
   104  
   105  //
   106  // bitwise
   107  //
   108  
   109  // void ·Or8(byte volatile*, byte);
   110  TEXT ·Or8(SB), NOSPLIT, $0-5
   111  	MOVL ptr+0(FP), AX
   112  	MOVB val+4(FP), BX
   113  	LOCK
   114  	ORB BX, (AX)
   115  	RET
   116  
   117  // func Or32(addr *uint32, v uint32)
   118  TEXT ·Or32(SB), NOSPLIT, $0-8
   119  	MOVL ptr+0(FP), AX
   120  	MOVL val+4(FP), BX
   121  	LOCK
   122  	ORL BX, (AX)
   123  	RET
   124  
   125  // void ·And8(byte volatile*, byte);
   126  TEXT ·And8(SB), NOSPLIT, $0-5
   127  	MOVL ptr+0(FP), AX
   128  	MOVB val+4(FP), BX
   129  	LOCK
   130  	ANDB BX, (AX)
   131  	RET
   132  
   133  // func And32(addr *uint32, v uint32)
   134  TEXT ·And32(SB), NOSPLIT, $0-8
   135  	MOVL ptr+0(FP), AX
   136  	MOVL val+4(FP), BX
   137  	LOCK
   138  	ANDL BX, (AX)
   139  	RET
   140  
   141  //
   142  // Swap
   143  //
   144  
   145  TEXT ·Swap32(SB), NOSPLIT, $0-12
   146  	MOVL ptr+0(FP), BX
   147  	MOVL new+4(FP), AX
   148  	XCHGL AX, 0(BX)
   149  	MOVL AX, ret+8(FP)
   150  	RET
   151  
   152  TEXT ·Swap64(SB),NOSPLIT,$0-20
   153  	NO_LOCAL_POINTERS
   154  	// no XCHGQ so use CMPXCHG8B loop
   155  	MOVL ptr+0(FP), BP
   156  	TESTL $7, BP
   157  	JZ 2(PC)
   158  	CALL ·panicUnaligned(SB)
   159  	// CX:BX = new
   160  	MOVL new_lo+4(FP), BX
   161  	MOVL new_hi+8(FP), CX
   162  	// DX:AX = *addr
   163  	MOVL 0(BP), AX
   164  	MOVL 4(BP), DX
   165  swaploop:
   166  	// if *addr == DX:AX
   167  	//	*addr = CX:BX
   168  	// else
   169  	//	DX:AX = *addr
   170  	// all in one instruction
   171  	LOCK
   172  	CMPXCHG8B 0(BP)
   173  	JNZ swaploop
   174  
   175  	// success
   176  	// return DX:AX
   177  	MOVL AX, ret_lo+12(FP)
   178  	MOVL DX, ret_hi+16(FP)
   179  	RET
   180  
   181  TEXT ·SwapUintptr(SB), NOSPLIT, $0-12
   182  	JMP ·Swap32(SB)
   183  
   184  TEXT ·SwapInt32(SB), NOSPLIT, $0-12
   185  	JMP ·Swap32(SB)
   186  
   187  TEXT ·SwapInt64(SB), NOSPLIT, $0-20
   188  	JMP ·Swap64(SB)
   189  
   190  //
   191  // Add
   192  //
   193  
   194  // uint32 Add32(uint32 volatile *val, int32 delta)
   195  TEXT ·Add32(SB), NOSPLIT, $0-12
   196  	MOVL ptr+0(FP), BX
   197  	MOVL delta+4(FP), AX
   198  	MOVL AX, CX
   199  	LOCK
   200  	XADDL AX, 0(BX)
   201  	ADDL CX, AX
   202  	MOVL AX, ret+8(FP)
   203  	RET
   204  
   205  TEXT ·Add64(SB), NOSPLIT, $0-20
   206  	NO_LOCAL_POINTERS
   207  	// no XADDQ so use CMPXCHG8B loop
   208  	MOVL ptr+0(FP), BP
   209  	TESTL $7, BP
   210  	JZ 2(PC)
   211  	CALL ·panicUnaligned(SB)
   212  	// DI:SI = delta
   213  	MOVL delta_lo+4(FP), SI
   214  	MOVL delta_hi+8(FP), DI
   215  	// DX:AX = *addr
   216  	MOVL 0(BP), AX
   217  	MOVL 4(BP), DX
   218  addloop:
   219  	// CX:BX = DX:AX (*addr) + DI:SI (delta)
   220  	MOVL AX, BX
   221  	MOVL DX, CX
   222  	ADDL SI, BX
   223  	ADCL DI, CX
   224  
   225  	// if *addr == DX:AX {
   226  	//	*addr = CX:BX
   227  	// } else {
   228  	//	DX:AX = *addr
   229  	// }
   230  	// all in one instruction
   231  	LOCK
   232  	CMPXCHG8B 0(BP)
   233  
   234  	JNZ addloop
   235  
   236  	// success
   237  	// return CX:BX
   238  	MOVL BX, ret_lo+12(FP)
   239  	MOVL CX, ret_hi+16(FP)
   240  	RET
   241  
   242  TEXT ·AddUintptr(SB), NOSPLIT, $0-12
   243  	JMP ·Add32(SB)
   244  
   245  TEXT ·AddInt32(SB), NOSPLIT, $0-12
   246  	JMP ·Add32(SB)
   247  
   248  TEXT ·AddInt64(SB), NOSPLIT, $0-20
   249  	JMP ·Add64(SB)
   250  
   251  //
   252  // Compare and swap
   253  //
   254  
   255  // bool Cas32(int32 *val, int32 old, int32 new)
   256  TEXT ·Cas32(SB), NOSPLIT, $0-13
   257  	MOVL ptr+0(FP), BX
   258  	MOVL old+4(FP), AX
   259  	MOVL new+8(FP), CX
   260  	LOCK
   261  	CMPXCHGL CX, 0(BX)
   262  	SETEQ ret+12(FP)
   263  	RET
   264  
   265  // bool ·Cas64(uint64 *val, uint64 old, uint64 new)
   266  TEXT ·Cas64(SB), NOSPLIT, $0-21
   267  	NO_LOCAL_POINTERS
   268  	MOVL ptr+0(FP), BP
   269  	TESTL $7, BP
   270  	JZ 2(PC)
   271  	CALL ·panicUnaligned(SB)
   272  	MOVL old_lo+4(FP), AX
   273  	MOVL old_hi+8(FP), DX
   274  	MOVL new_lo+12(FP), BX
   275  	MOVL new_hi+16(FP), CX
   276  	LOCK
   277  	CMPXCHG8B 0(BP)
   278  	SETEQ ret+20(FP)
   279  	RET
   280  
   281  TEXT ·CasUintptr(SB), NOSPLIT, $0-13
   282  	JMP ·Cas32(SB)
   283  
   284  // bool CasUnsafePointer(void **p, void *old, void *new)
   285  TEXT ·CasUnsafePointer(SB), NOSPLIT, $0-13
   286  	MOVL ptr+0(FP), BX
   287  	MOVL old+4(FP), AX
   288  	MOVL new+8(FP), CX
   289  	LOCK
   290  	CMPXCHGL CX, 0(BX)
   291  	SETEQ ret+12(FP)
   292  	RET
   293  
   294  TEXT ·CasInt32(SB), NOSPLIT, $0-13
   295  	JMP ·Cas32(SB)
   296  
   297  TEXT ·CasInt64(SB), NOSPLIT, $0-21
   298  	JMP ·Cas64(SB)
   299  
   300  //
   301  // CasRel
   302  //
   303  
   304  TEXT ·CasRel32(SB), NOSPLIT, $0-13
   305  	JMP ·Cas32(SB)