github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/internal/atomic/asm_s390x.s (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "textflag.h"
     6  
     7  // func Store(ptr *uint32, val uint32)
     8  TEXT ·Store(SB), NOSPLIT, $0
     9  	MOVD	ptr+0(FP), R2
    10  	MOVWZ	val+8(FP), R3
    11  	MOVW	R3, 0(R2)
    12  	SYNC
    13  	RET
    14  
    15  // func Store8(ptr *uint8, val uint8)
    16  TEXT ·Store8(SB), NOSPLIT, $0
    17  	MOVD	ptr+0(FP), R2
    18  	MOVB	val+8(FP), R3
    19  	MOVB	R3, 0(R2)
    20  	SYNC
    21  	RET
    22  
    23  // func Store64(ptr *uint64, val uint64)
    24  TEXT ·Store64(SB), NOSPLIT, $0
    25  	MOVD	ptr+0(FP), R2
    26  	MOVD	val+8(FP), R3
    27  	MOVD	R3, 0(R2)
    28  	SYNC
    29  	RET
    30  
    31  // func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
    32  TEXT ·StorepNoWB(SB), NOSPLIT, $0
    33  	MOVD	ptr+0(FP), R2
    34  	MOVD	val+8(FP), R3
    35  	MOVD	R3, 0(R2)
    36  	SYNC
    37  	RET
    38  
    39  // func Cas(ptr *uint32, old, new uint32) bool
    40  // Atomically:
    41  //	if *ptr == old {
    42  //		*val = new
    43  //		return 1
    44  //	} else {
    45  //		return 0
    46  //	}
    47  TEXT ·Cas(SB), NOSPLIT, $0-17
    48  	MOVD	ptr+0(FP), R3
    49  	MOVWZ	old+8(FP), R4
    50  	MOVWZ	new+12(FP), R5
    51  	CS	R4, R5, 0(R3)    //  if (R4 == 0(R3)) then 0(R3)= R5
    52  	BNE	cas_fail
    53  	MOVB	$1, ret+16(FP)
    54  	RET
    55  cas_fail:
    56  	MOVB	$0, ret+16(FP)
    57  	RET
    58  
    59  // func Cas64(ptr *uint64, old, new uint64) bool
    60  // Atomically:
    61  //	if *ptr == old {
    62  //		*ptr = new
    63  //		return 1
    64  //	} else {
    65  //		return 0
    66  //	}
    67  TEXT ·Cas64(SB), NOSPLIT, $0-25
    68  	MOVD	ptr+0(FP), R3
    69  	MOVD	old+8(FP), R4
    70  	MOVD	new+16(FP), R5
    71  	CSG	R4, R5, 0(R3)    //  if (R4 == 0(R3)) then 0(R3)= R5
    72  	BNE	cas64_fail
    73  	MOVB	$1, ret+24(FP)
    74  	RET
    75  cas64_fail:
    76  	MOVB	$0, ret+24(FP)
    77  	RET
    78  
    79  // func Casuintptr(ptr *uintptr, old, new uintptr) bool
    80  TEXT ·Casuintptr(SB), NOSPLIT, $0-25
    81  	BR	·Cas64(SB)
    82  
    83  // func CasRel(ptr *uint32, old, new uint32) bool
    84  TEXT ·CasRel(SB), NOSPLIT, $0-17
    85  	BR	·Cas(SB)
    86  
    87  // func Loaduintptr(ptr *uintptr) uintptr
    88  TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
    89  	BR	·Load64(SB)
    90  
    91  // func Loaduint(ptr *uint) uint
    92  TEXT ·Loaduint(SB), NOSPLIT, $0-16
    93  	BR	·Load64(SB)
    94  
    95  // func Storeuintptr(ptr *uintptr, new uintptr)
    96  TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
    97  	BR	·Store64(SB)
    98  
    99  // func Loadint64(ptr *int64) int64
   100  TEXT ·Loadint64(SB), NOSPLIT, $0-16
   101  	BR	·Load64(SB)
   102  
   103  // func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
   104  TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
   105  	BR	·Xadd64(SB)
   106  
   107  // func Xaddint64(ptr *int64, delta int64) int64
   108  TEXT ·Xaddint64(SB), NOSPLIT, $0-24
   109  	BR	·Xadd64(SB)
   110  
   111  // func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
   112  // Atomically:
   113  //	if *ptr == old {
   114  //		*ptr = new
   115  //		return 1
   116  //	} else {
   117  //		return 0
   118  //	}
   119  TEXT ·Casp1(SB), NOSPLIT, $0-25
   120  	BR ·Cas64(SB)
   121  
   122  // func Xadd(ptr *uint32, delta int32) uint32
   123  // Atomically:
   124  //	*ptr += delta
   125  //	return *ptr
   126  TEXT ·Xadd(SB), NOSPLIT, $0-20
   127  	MOVD	ptr+0(FP), R4
   128  	MOVW	delta+8(FP), R5
   129  	MOVW	(R4), R3
   130  repeat:
   131  	ADD	R5, R3, R6
   132  	CS	R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
   133  	BNE	repeat
   134  	MOVW	R6, ret+16(FP)
   135  	RET
   136  
   137  // func Xadd64(ptr *uint64, delta int64) uint64
   138  TEXT ·Xadd64(SB), NOSPLIT, $0-24
   139  	MOVD	ptr+0(FP), R4
   140  	MOVD	delta+8(FP), R5
   141  	MOVD	(R4), R3
   142  repeat:
   143  	ADD	R5, R3, R6
   144  	CSG	R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
   145  	BNE	repeat
   146  	MOVD	R6, ret+16(FP)
   147  	RET
   148  
   149  // func Xchg(ptr *uint32, new uint32) uint32
   150  TEXT ·Xchg(SB), NOSPLIT, $0-20
   151  	MOVD	ptr+0(FP), R4
   152  	MOVW	new+8(FP), R3
   153  	MOVW	(R4), R6
   154  repeat:
   155  	CS	R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
   156  	BNE	repeat
   157  	MOVW	R6, ret+16(FP)
   158  	RET
   159  
   160  // func Xchg64(ptr *uint64, new uint64) uint64
   161  TEXT ·Xchg64(SB), NOSPLIT, $0-24
   162  	MOVD	ptr+0(FP), R4
   163  	MOVD	new+8(FP), R3
   164  	MOVD	(R4), R6
   165  repeat:
   166  	CSG	R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
   167  	BNE	repeat
   168  	MOVD	R6, ret+16(FP)
   169  	RET
   170  
   171  // func Xchguintptr(ptr *uintptr, new uintptr) uintptr
   172  TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
   173  	BR	·Xchg64(SB)
   174  
   175  // func Or8(addr *uint8, v uint8)
   176  TEXT ·Or8(SB), NOSPLIT, $0-9
   177  	MOVD	ptr+0(FP), R3
   178  	MOVBZ	val+8(FP), R4
   179  	// We don't have atomic operations that work on individual bytes so we
   180  	// need to align addr down to a word boundary and create a mask
   181  	// containing v to OR with the entire word atomically.
   182  	MOVD	$(3<<3), R5
   183  	RXSBG	$59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3)
   184  	ANDW	$~3, R3              // R3 = floor(addr, 4) = addr &^ 3
   185  	SLW	R5, R4               // R4 = uint32(v) << R5
   186  	LAO	R4, R6, 0(R3)        // R6 = *R3; *R3 |= R4; (atomic)
   187  	RET
   188  
   189  // func And8(addr *uint8, v uint8)
   190  TEXT ·And8(SB), NOSPLIT, $0-9
   191  	MOVD	ptr+0(FP), R3
   192  	MOVBZ	val+8(FP), R4
   193  	// We don't have atomic operations that work on individual bytes so we
   194  	// need to align addr down to a word boundary and create a mask
   195  	// containing v to AND with the entire word atomically.
   196  	ORW	$~0xff, R4           // R4 = uint32(v) | 0xffffff00
   197  	MOVD	$(3<<3), R5
   198  	RXSBG	$59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3)
   199  	ANDW	$~3, R3              // R3 = floor(addr, 4) = addr &^ 3
   200  	RLL	R5, R4, R4           // R4 = rotl(R4, R5)
   201  	LAN	R4, R6, 0(R3)        // R6 = *R3; *R3 &= R4; (atomic)
   202  	RET
   203  
   204  // func Or(addr *uint32, v uint32)
   205  TEXT ·Or(SB), NOSPLIT, $0-12
   206  	MOVD	ptr+0(FP), R3
   207  	MOVW	val+8(FP), R4
   208  	LAO	R4, R6, 0(R3)        // R6 = *R3; *R3 |= R4; (atomic)
   209  	RET
   210  
   211  // func And(addr *uint32, v uint32)
   212  TEXT ·And(SB), NOSPLIT, $0-12
   213  	MOVD	ptr+0(FP), R3
   214  	MOVW	val+8(FP), R4
   215  	LAN	R4, R6, 0(R3)        // R6 = *R3; *R3 &= R4; (atomic)
   216  	RET