github.com/zxy12/go_duplicate_112_new@v0.0.0-20200807091221-747231827200/src/runtime/internal/atomic/asm_s390x.s (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 #include "textflag.h" 6 7 // func Cas(ptr *uint32, old, new uint32) bool 8 // Atomically: 9 // if *ptr == old { 10 // *val = new 11 // return 1 12 // } else { 13 // return 0 14 // } 15 TEXT ·Cas(SB), NOSPLIT, $0-17 16 MOVD ptr+0(FP), R3 17 MOVWZ old+8(FP), R4 18 MOVWZ new+12(FP), R5 19 CS R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5 20 BNE cas_fail 21 MOVB $1, ret+16(FP) 22 RET 23 cas_fail: 24 MOVB $0, ret+16(FP) 25 RET 26 27 // func Cas64(ptr *uint64, old, new uint64) bool 28 // Atomically: 29 // if *ptr == old { 30 // *ptr = new 31 // return 1 32 // } else { 33 // return 0 34 // } 35 TEXT ·Cas64(SB), NOSPLIT, $0-25 36 MOVD ptr+0(FP), R3 37 MOVD old+8(FP), R4 38 MOVD new+16(FP), R5 39 CSG R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5 40 BNE cas64_fail 41 MOVB $1, ret+24(FP) 42 RET 43 cas64_fail: 44 MOVB $0, ret+24(FP) 45 RET 46 47 // func Casuintptr(ptr *uintptr, old, new uintptr) bool 48 TEXT ·Casuintptr(SB), NOSPLIT, $0-25 49 BR ·Cas64(SB) 50 51 // func CasRel(ptr *uint32, old, new uint32) bool 52 TEXT ·CasRel(SB), NOSPLIT, $0-17 53 BR ·Cas(SB) 54 55 // func Loaduintptr(ptr *uintptr) uintptr 56 TEXT ·Loaduintptr(SB), NOSPLIT, $0-16 57 BR ·Load64(SB) 58 59 // func Loaduint(ptr *uint) uint 60 TEXT ·Loaduint(SB), NOSPLIT, $0-16 61 BR ·Load64(SB) 62 63 // func Storeuintptr(ptr *uintptr, new uintptr) 64 TEXT ·Storeuintptr(SB), NOSPLIT, $0-16 65 BR ·Store64(SB) 66 67 // func Loadint64(ptr *int64) int64 68 TEXT ·Loadint64(SB), NOSPLIT, $0-16 69 BR ·Load64(SB) 70 71 // func Xadduintptr(ptr *uintptr, delta uintptr) uintptr 72 TEXT ·Xadduintptr(SB), NOSPLIT, $0-24 73 BR ·Xadd64(SB) 74 75 // func Xaddint64(ptr *int64, delta int64) int64 76 TEXT ·Xaddint64(SB), NOSPLIT, $0-24 77 BR ·Xadd64(SB) 78 79 // func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool 80 // Atomically: 81 // if *ptr == old { 82 // *ptr = new 83 // return 1 84 // } else { 85 // return 0 86 // } 87 TEXT ·Casp1(SB), NOSPLIT, $0-25 88 BR ·Cas64(SB) 89 90 // func Xadd(ptr *uint32, delta int32) uint32 91 // Atomically: 92 // *ptr += delta 93 // return *ptr 94 TEXT ·Xadd(SB), NOSPLIT, $0-20 95 MOVD ptr+0(FP), R4 96 MOVW delta+8(FP), R5 97 MOVW (R4), R3 98 repeat: 99 ADD R5, R3, R6 100 CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) 101 BNE repeat 102 MOVW R6, ret+16(FP) 103 RET 104 105 // func Xadd64(ptr *uint64, delta int64) uint64 106 TEXT ·Xadd64(SB), NOSPLIT, $0-24 107 MOVD ptr+0(FP), R4 108 MOVD delta+8(FP), R5 109 MOVD (R4), R3 110 repeat: 111 ADD R5, R3, R6 112 CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) 113 BNE repeat 114 MOVD R6, ret+16(FP) 115 RET 116 117 // func Xchg(ptr *uint32, new uint32) uint32 118 TEXT ·Xchg(SB), NOSPLIT, $0-20 119 MOVD ptr+0(FP), R4 120 MOVW new+8(FP), R3 121 MOVW (R4), R6 122 repeat: 123 CS R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) 124 BNE repeat 125 MOVW R6, ret+16(FP) 126 RET 127 128 // func Xchg64(ptr *uint64, new uint64) uint64 129 TEXT ·Xchg64(SB), NOSPLIT, $0-24 130 MOVD ptr+0(FP), R4 131 MOVD new+8(FP), R3 132 MOVD (R4), R6 133 repeat: 134 CSG R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4) 135 BNE repeat 136 MOVD R6, ret+16(FP) 137 RET 138 139 // func Xchguintptr(ptr *uintptr, new uintptr) uintptr 140 TEXT ·Xchguintptr(SB), NOSPLIT, $0-24 141 BR ·Xchg64(SB) 142 143 // func Or8(addr *uint8, v uint8) 144 TEXT ·Or8(SB), NOSPLIT, $0-9 145 MOVD ptr+0(FP), R3 146 MOVBZ val+8(FP), R4 147 // Calculate shift. 148 MOVD R3, R5 149 AND $3, R5 150 XOR $3, R5 // big endian - flip direction 151 SLD $3, R5 // MUL $8, R5 152 SLD R5, R4 153 // Align ptr down to 4 bytes so we can use 32-bit load/store. 154 AND $-4, R3 155 MOVWZ 0(R3), R6 156 again: 157 OR R4, R6, R7 158 CS R6, R7, 0(R3) // if R6==(R3) then (R3)=R7 else R6=(R3) 159 BNE again 160 RET 161 162 // func And8(addr *uint8, v uint8) 163 TEXT ·And8(SB), NOSPLIT, $0-9 164 MOVD ptr+0(FP), R3 165 MOVBZ val+8(FP), R4 166 // Calculate shift. 167 MOVD R3, R5 168 AND $3, R5 169 XOR $3, R5 // big endian - flip direction 170 SLD $3, R5 // MUL $8, R5 171 OR $-256, R4 // create 0xffffffffffffffxx 172 RLLG R5, R4 173 // Align ptr down to 4 bytes so we can use 32-bit load/store. 174 AND $-4, R3 175 MOVWZ 0(R3), R6 176 again: 177 AND R4, R6, R7 178 CS R6, R7, 0(R3) // if R6==(R3) then (R3)=R7 else R6=(R3) 179 BNE again 180 RET