github.com/m10x/go/src@v0.0.0-20220112094212-ba61592315da/runtime/internal/atomic/atomic_arm.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build arm 6 7 package atomic 8 9 import ( 10 "internal/cpu" 11 "unsafe" 12 ) 13 14 // Export some functions via linkname to assembly in sync/atomic. 15 //go:linkname Xchg 16 //go:linkname Xchguintptr 17 18 type spinlock struct { 19 v uint32 20 } 21 22 //go:nosplit 23 func (l *spinlock) lock() { 24 for { 25 if Cas(&l.v, 0, 1) { 26 return 27 } 28 } 29 } 30 31 //go:nosplit 32 func (l *spinlock) unlock() { 33 Store(&l.v, 0) 34 } 35 36 var locktab [57]struct { 37 l spinlock 38 pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte 39 } 40 41 func addrLock(addr *uint64) *spinlock { 42 return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l 43 } 44 45 // Atomic add and return new value. 46 //go:nosplit 47 func Xadd(val *uint32, delta int32) uint32 { 48 for { 49 oval := *val 50 nval := oval + uint32(delta) 51 if Cas(val, oval, nval) { 52 return nval 53 } 54 } 55 } 56 57 //go:noescape 58 func Xadduintptr(ptr *uintptr, delta uintptr) uintptr 59 60 //go:nosplit 61 func Xchg(addr *uint32, v uint32) uint32 { 62 for { 63 old := *addr 64 if Cas(addr, old, v) { 65 return old 66 } 67 } 68 } 69 70 //go:nosplit 71 func Xchguintptr(addr *uintptr, v uintptr) uintptr { 72 return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v))) 73 } 74 75 // Not noescape -- it installs a pointer to addr. 76 func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer) 77 78 //go:noescape 79 func Store(addr *uint32, v uint32) 80 81 //go:noescape 82 func StoreRel(addr *uint32, v uint32) 83 84 //go:noescape 85 func StoreReluintptr(addr *uintptr, v uintptr) 86 87 //go:nosplit 88 func goCas64(addr *uint64, old, new uint64) bool { 89 if uintptr(unsafe.Pointer(addr))&7 != 0 { 90 *(*int)(nil) = 0 // crash on unaligned uint64 91 } 92 _ = *addr // if nil, fault before taking the lock 93 var ok bool 94 addrLock(addr).lock() 95 if *addr == old { 96 *addr = new 97 ok = true 98 } 99 addrLock(addr).unlock() 100 return ok 101 } 102 103 //go:nosplit 104 func goXadd64(addr *uint64, delta int64) uint64 { 105 if uintptr(unsafe.Pointer(addr))&7 != 0 { 106 *(*int)(nil) = 0 // crash on unaligned uint64 107 } 108 _ = *addr // if nil, fault before taking the lock 109 var r uint64 110 addrLock(addr).lock() 111 r = *addr + uint64(delta) 112 *addr = r 113 addrLock(addr).unlock() 114 return r 115 } 116 117 //go:nosplit 118 func goXchg64(addr *uint64, v uint64) uint64 { 119 if uintptr(unsafe.Pointer(addr))&7 != 0 { 120 *(*int)(nil) = 0 // crash on unaligned uint64 121 } 122 _ = *addr // if nil, fault before taking the lock 123 var r uint64 124 addrLock(addr).lock() 125 r = *addr 126 *addr = v 127 addrLock(addr).unlock() 128 return r 129 } 130 131 //go:nosplit 132 func goLoad64(addr *uint64) uint64 { 133 if uintptr(unsafe.Pointer(addr))&7 != 0 { 134 *(*int)(nil) = 0 // crash on unaligned uint64 135 } 136 _ = *addr // if nil, fault before taking the lock 137 var r uint64 138 addrLock(addr).lock() 139 r = *addr 140 addrLock(addr).unlock() 141 return r 142 } 143 144 //go:nosplit 145 func goStore64(addr *uint64, v uint64) { 146 if uintptr(unsafe.Pointer(addr))&7 != 0 { 147 *(*int)(nil) = 0 // crash on unaligned uint64 148 } 149 _ = *addr // if nil, fault before taking the lock 150 addrLock(addr).lock() 151 *addr = v 152 addrLock(addr).unlock() 153 } 154 155 //go:nosplit 156 func Or8(addr *uint8, v uint8) { 157 // Align down to 4 bytes and use 32-bit CAS. 158 uaddr := uintptr(unsafe.Pointer(addr)) 159 addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) 160 word := uint32(v) << ((uaddr & 3) * 8) // little endian 161 for { 162 old := *addr32 163 if Cas(addr32, old, old|word) { 164 return 165 } 166 } 167 } 168 169 //go:nosplit 170 func And8(addr *uint8, v uint8) { 171 // Align down to 4 bytes and use 32-bit CAS. 172 uaddr := uintptr(unsafe.Pointer(addr)) 173 addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) 174 word := uint32(v) << ((uaddr & 3) * 8) // little endian 175 mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian 176 word |= ^mask 177 for { 178 old := *addr32 179 if Cas(addr32, old, old&word) { 180 return 181 } 182 } 183 } 184 185 //go:nosplit 186 func Or(addr *uint32, v uint32) { 187 for { 188 old := *addr 189 if Cas(addr, old, old|v) { 190 return 191 } 192 } 193 } 194 195 //go:nosplit 196 func And(addr *uint32, v uint32) { 197 for { 198 old := *addr 199 if Cas(addr, old, old&v) { 200 return 201 } 202 } 203 } 204 205 //go:nosplit 206 func armcas(ptr *uint32, old, new uint32) bool 207 208 //go:noescape 209 func Load(addr *uint32) uint32 210 211 // NO go:noescape annotation; *addr escapes if result escapes (#31525) 212 func Loadp(addr unsafe.Pointer) unsafe.Pointer 213 214 //go:noescape 215 func Load8(addr *uint8) uint8 216 217 //go:noescape 218 func LoadAcq(addr *uint32) uint32 219 220 //go:noescape 221 func LoadAcquintptr(ptr *uintptr) uintptr 222 223 //go:noescape 224 func Cas64(addr *uint64, old, new uint64) bool 225 226 //go:noescape 227 func CasRel(addr *uint32, old, new uint32) bool 228 229 //go:noescape 230 func Xadd64(addr *uint64, delta int64) uint64 231 232 //go:noescape 233 func Xchg64(addr *uint64, v uint64) uint64 234 235 //go:noescape 236 func Load64(addr *uint64) uint64 237 238 //go:noescape 239 func Store8(addr *uint8, v uint8) 240 241 //go:noescape 242 func Store64(addr *uint64, v uint64)