github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/runtime/internal/atomic/atomic_arm.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build arm 6 7 package atomic 8 9 import ( 10 "internal/cpu" 11 "unsafe" 12 ) 13 14 const ( 15 offsetARMHasV7Atomics = unsafe.Offsetof(cpu.ARM.HasV7Atomics) 16 ) 17 18 // Export some functions via linkname to assembly in sync/atomic. 19 // 20 //go:linkname Xchg 21 //go:linkname Xchguintptr 22 23 type spinlock struct { 24 v uint32 25 } 26 27 //go:nosplit 28 func (l *spinlock) lock() { 29 for { 30 if Cas(&l.v, 0, 1) { 31 return 32 } 33 } 34 } 35 36 //go:nosplit 37 func (l *spinlock) unlock() { 38 Store(&l.v, 0) 39 } 40 41 var locktab [57]struct { 42 l spinlock 43 pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte 44 } 45 46 func addrLock(addr *uint64) *spinlock { 47 return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l 48 } 49 50 // Atomic add and return new value. 51 // 52 //go:nosplit 53 func Xadd(val *uint32, delta int32) uint32 { 54 for { 55 oval := *val 56 nval := oval + uint32(delta) 57 if Cas(val, oval, nval) { 58 return nval 59 } 60 } 61 } 62 63 //go:noescape 64 func Xadduintptr(ptr *uintptr, delta uintptr) uintptr 65 66 //go:nosplit 67 func Xchg(addr *uint32, v uint32) uint32 { 68 for { 69 old := *addr 70 if Cas(addr, old, v) { 71 return old 72 } 73 } 74 } 75 76 //go:nosplit 77 func Xchguintptr(addr *uintptr, v uintptr) uintptr { 78 return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v))) 79 } 80 81 // Not noescape -- it installs a pointer to addr. 82 func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer) 83 84 //go:noescape 85 func Store(addr *uint32, v uint32) 86 87 //go:noescape 88 func StoreRel(addr *uint32, v uint32) 89 90 //go:noescape 91 func StoreReluintptr(addr *uintptr, v uintptr) 92 93 //go:nosplit 94 func goCas64(addr *uint64, old, new uint64) bool { 95 if uintptr(unsafe.Pointer(addr))&7 != 0 { 96 *(*int)(nil) = 0 // crash on unaligned uint64 97 } 98 _ = *addr // if nil, fault before taking the lock 99 var ok bool 100 addrLock(addr).lock() 101 if *addr == old { 102 *addr = new 103 ok = true 104 } 105 addrLock(addr).unlock() 106 return ok 107 } 108 109 //go:nosplit 110 func goXadd64(addr *uint64, delta int64) uint64 { 111 if uintptr(unsafe.Pointer(addr))&7 != 0 { 112 *(*int)(nil) = 0 // crash on unaligned uint64 113 } 114 _ = *addr // if nil, fault before taking the lock 115 var r uint64 116 addrLock(addr).lock() 117 r = *addr + uint64(delta) 118 *addr = r 119 addrLock(addr).unlock() 120 return r 121 } 122 123 //go:nosplit 124 func goXchg64(addr *uint64, v uint64) uint64 { 125 if uintptr(unsafe.Pointer(addr))&7 != 0 { 126 *(*int)(nil) = 0 // crash on unaligned uint64 127 } 128 _ = *addr // if nil, fault before taking the lock 129 var r uint64 130 addrLock(addr).lock() 131 r = *addr 132 *addr = v 133 addrLock(addr).unlock() 134 return r 135 } 136 137 //go:nosplit 138 func goLoad64(addr *uint64) uint64 { 139 if uintptr(unsafe.Pointer(addr))&7 != 0 { 140 *(*int)(nil) = 0 // crash on unaligned uint64 141 } 142 _ = *addr // if nil, fault before taking the lock 143 var r uint64 144 addrLock(addr).lock() 145 r = *addr 146 addrLock(addr).unlock() 147 return r 148 } 149 150 //go:nosplit 151 func goStore64(addr *uint64, v uint64) { 152 if uintptr(unsafe.Pointer(addr))&7 != 0 { 153 *(*int)(nil) = 0 // crash on unaligned uint64 154 } 155 _ = *addr // if nil, fault before taking the lock 156 addrLock(addr).lock() 157 *addr = v 158 addrLock(addr).unlock() 159 } 160 161 //go:nosplit 162 func Or8(addr *uint8, v uint8) { 163 // Align down to 4 bytes and use 32-bit CAS. 164 uaddr := uintptr(unsafe.Pointer(addr)) 165 addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) 166 word := uint32(v) << ((uaddr & 3) * 8) // little endian 167 for { 168 old := *addr32 169 if Cas(addr32, old, old|word) { 170 return 171 } 172 } 173 } 174 175 //go:nosplit 176 func And8(addr *uint8, v uint8) { 177 // Align down to 4 bytes and use 32-bit CAS. 178 uaddr := uintptr(unsafe.Pointer(addr)) 179 addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3)) 180 word := uint32(v) << ((uaddr & 3) * 8) // little endian 181 mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian 182 word |= ^mask 183 for { 184 old := *addr32 185 if Cas(addr32, old, old&word) { 186 return 187 } 188 } 189 } 190 191 //go:nosplit 192 func Or(addr *uint32, v uint32) { 193 for { 194 old := *addr 195 if Cas(addr, old, old|v) { 196 return 197 } 198 } 199 } 200 201 //go:nosplit 202 func And(addr *uint32, v uint32) { 203 for { 204 old := *addr 205 if Cas(addr, old, old&v) { 206 return 207 } 208 } 209 } 210 211 //go:nosplit 212 func armcas(ptr *uint32, old, new uint32) bool 213 214 //go:noescape 215 func Load(addr *uint32) uint32 216 217 // NO go:noescape annotation; *addr escapes if result escapes (#31525) 218 func Loadp(addr unsafe.Pointer) unsafe.Pointer 219 220 //go:noescape 221 func Load8(addr *uint8) uint8 222 223 //go:noescape 224 func LoadAcq(addr *uint32) uint32 225 226 //go:noescape 227 func LoadAcquintptr(ptr *uintptr) uintptr 228 229 //go:noescape 230 func Cas64(addr *uint64, old, new uint64) bool 231 232 //go:noescape 233 func CasRel(addr *uint32, old, new uint32) bool 234 235 //go:noescape 236 func Xadd64(addr *uint64, delta int64) uint64 237 238 //go:noescape 239 func Xchg64(addr *uint64, v uint64) uint64 240 241 //go:noescape 242 func Load64(addr *uint64) uint64 243 244 //go:noescape 245 func Store8(addr *uint8, v uint8) 246 247 //go:noescape 248 func Store64(addr *uint64, v uint64)