github.com/sbinet/go@v0.0.0-20160827155028-54d7de7dd62b/src/runtime/internal/atomic/asm_s390x.s (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 #include "textflag.h" 6 7 // func Cas(ptr *uint32, old, new uint32) bool 8 // Atomically: 9 // if *ptr == old { 10 // *val = new 11 // return 1 12 // } else { 13 // return 0 14 // } 15 TEXT ·Cas(SB), NOSPLIT, $0-17 16 MOVD ptr+0(FP), R3 17 MOVWZ old+8(FP), R4 18 MOVWZ new+12(FP), R5 19 CS R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5 20 BNE cas_fail 21 MOVB $1, ret+16(FP) 22 RET 23 cas_fail: 24 MOVB $0, ret+16(FP) 25 RET 26 27 // func Cas64(ptr *uint64, old, new uint64) bool 28 // Atomically: 29 // if *ptr == old { 30 // *ptr = new 31 // return 1 32 // } else { 33 // return 0 34 // } 35 TEXT ·Cas64(SB), NOSPLIT, $0-25 36 MOVD ptr+0(FP), R3 37 MOVD old+8(FP), R4 38 MOVD new+16(FP), R5 39 CSG R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5 40 BNE cas64_fail 41 MOVB $1, ret+24(FP) 42 RET 43 cas64_fail: 44 MOVB $0, ret+24(FP) 45 RET 46 47 // func Casuintptr(ptr *uintptr, old, new uintptr) bool 48 TEXT ·Casuintptr(SB), NOSPLIT, $0-25 49 BR ·Cas64(SB) 50 51 // func Loaduintptr(ptr *uintptr) uintptr 52 TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 53 BR ·Load64(SB) 54 55 // func Loaduint(ptr *uint) uint 56 TEXT ·Loaduint(SB), NOSPLIT, $0-16 57 BR ·Load64(SB) 58 59 // func Storeuintptr(ptr *uintptr, new uintptr) 60 TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 61 BR ·Store64(SB) 62 63 // func Loadint64(ptr *int64) int64 64 TEXT ·Loadint64(SB), NOSPLIT, $0-16 65 BR ·Load64(SB) 66 67 // func Xadduintptr(ptr *uintptr, delta uintptr) uintptr 68 TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 69 BR ·Xadd64(SB) 70 71 // func Xaddint64(ptr *int64, delta int64) int64 72 TEXT ·Xaddint64(SB), NOSPLIT, $0-16 73 BR ·Xadd64(SB) 74 75 // func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool 76 // Atomically: 77 // if *ptr == old { 78 // *ptr = new 79 // return 1 80 // } else { 81 // return 0 82 // } 83 TEXT ·Casp1(SB), NOSPLIT, $0-25 84 BR ·Cas64(SB) 85 86 // func Xadd(ptr *uint32, delta int32) uint32 87 // Atomically: 88 // *ptr += delta 89 // return *ptr 90 TEXT ·Xadd(SB), NOSPLIT, $0-20 91 MOVD ptr+0(FP), R4 92 MOVW delta+8(FP), R5 93 MOVW (R4), R3 94 repeat: 95 ADD R5, R3, R6 96 CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) 97 BNE repeat 98 MOVW R6, ret+16(FP) 99 RET 100 101 // func Xadd64(ptr *uint64, delta int64) uint64 102 TEXT ·Xadd64(SB), NOSPLIT, $0-24 103 MOVD ptr+0(FP), R4 104 MOVD delta+8(FP), R5 105 MOVD (R4), R3 106 repeat: 107 ADD R5, R3, R6 108 CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) 109 BNE repeat 110 MOVD R6, ret+16(FP) 111 RET 112 113 // func Xchg(ptr *uint32, new uint32) uint32 114 TEXT ·Xchg(SB), NOSPLIT, $0-20 115 MOVD ptr+0(FP), R4 116 MOVW new+8(FP), R3 117 MOVW (R4), R6 118 repeat: 119 CS R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) 120 BNE repeat 121 MOVW R6, ret+16(FP) 122 RET 123 124 // func Xchg64(ptr *uint64, new uint64) uint64 125 TEXT ·Xchg64(SB), NOSPLIT, $0-24 126 MOVD ptr+0(FP), R4 127 MOVD new+8(FP), R3 128 MOVD (R4), R6 129 repeat: 130 CSG R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) 131 BNE repeat 132 MOVD R6, ret+16(FP) 133 RET 134 135 // func Xchguintptr(ptr *uintptr, new uintptr) uintptr 136 TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 137 BR ·Xchg64(SB) 138 139 // func Or8(addr *uint8, v uint8) 140 TEXT ·Or8(SB), NOSPLIT, $0-9 141 MOVD ptr+0(FP), R3 142 MOVBZ val+8(FP), R4 143 // Calculate shift. 144 AND $3, R3, R5 145 XOR $3, R5 // big endian - flip direction 146 SLD $3, R5 // MUL $8, R5 147 SLD R5, R4 148 // Align ptr down to 4 bytes so we can use 32-bit load/store. 149 AND $-4, R3 150 MOVWZ 0(R3), R6 151 again: 152 OR R4, R6, R7 153 CS R6, R7, 0(R3) // if R6==(R3) then (R3)=R7 else R6=(R3) 154 BNE again 155 RET 156 157 // func And8(addr *uint8, v uint8) 158 TEXT ·And8(SB), NOSPLIT, $0-9 159 MOVD ptr+0(FP), R3 160 MOVBZ val+8(FP), R4 161 // Calculate shift. 162 AND $3, R3, R5 163 XOR $3, R5 // big endian - flip direction 164 SLD $3, R5 // MUL $8, R5 165 OR $-256, R4 // create 0xffffffffffffffxx 166 RLLG R5, R4 167 // Align ptr down to 4 bytes so we can use 32-bit load/store. 168 AND $-4, R3 169 MOVWZ 0(R3), R6 170 again: 171 AND R4, R6, R7 172 CS R6, R7, 0(R3) // if R6==(R3) then (R3)=R7 else R6=(R3) 173 BNE again 174 RET