github.com/primecitizens/pcz/std@v0.2.1/core/atomic/atomic_arm.go (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright 2023 The Prime Citizens
     3  //
     4  // Copyright 2009 The Go Authors. All rights reserved.
     5  // Use of this source code is governed by a BSD-style
     6  // license that can be found in the LICENSE file.
     7  
     8  //go:build arm
     9  
    10  package atomic
    11  
    12  import (
    13  	"unsafe"
    14  
    15  	"github.com/primecitizens/pcz/std/core/cpu"
    16  )
    17  
    18  type spinlock struct {
    19  	v uint32
    20  }
    21  
    22  //go:nosplit
    23  func (l *spinlock) lock() {
    24  	for {
    25  		if Cas32(&l.v, 0, 1) {
    26  			return
    27  		}
    28  	}
    29  }
    30  
    31  //go:nosplit
    32  func (l *spinlock) unlock() {
    33  	Store32(&l.v, 0)
    34  }
    35  
    36  var locktab [57]struct {
    37  	l   spinlock
    38  	pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte
    39  }
    40  
    41  func addrLock(addr *uint64) *spinlock {
    42  	return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
    43  }
    44  
    45  //go:nosplit
    46  func armcas(ptr *uint32, old, new uint32) bool
    47  
    48  //
    49  // Store
    50  //
    51  
    52  //go:noescape
    53  func Store8(addr *uint8, v uint8)
    54  
    55  //go:noescape
    56  func Store32(addr *uint32, v uint32)
    57  
    58  //go:noescape
    59  func Store64(addr *uint64, v uint64)
    60  
    61  // Not noescape -- it installs a pointer to addr.
    62  func StorePointer(addr unsafe.Pointer, v unsafe.Pointer)
    63  
    64  //go:nosplit
    65  func goStore64(addr *uint64, v uint64) {
    66  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
    67  		*(*int)(nil) = 0 // crash on unaligned uint64
    68  	}
    69  	_ = *addr // if nil, fault before taking the lock
    70  	addrLock(addr).lock()
    71  	*addr = v
    72  	addrLock(addr).unlock()
    73  }
    74  
    75  //
    76  // StoreRel
    77  //
    78  
    79  //go:noescape
    80  func StoreRel32(addr *uint32, v uint32)
    81  
    82  //go:noescape
    83  func StoreRelUintptr(addr *uintptr, v uintptr)
    84  
    85  //
    86  // Load
    87  //
    88  
    89  //go:noescape
    90  func Load8(addr *uint8) uint8
    91  
    92  //go:noescape
    93  func Load32(addr *uint32) uint32
    94  
    95  //go:noescape
    96  func Load64(addr *uint64) uint64
    97  
    98  // NO go:noescape annotation; *addr escapes if result escapes (#31525)
    99  func LoadPointer(addr unsafe.Pointer) unsafe.Pointer
   100  
   101  //go:nosplit
   102  func goLoad64(addr *uint64) uint64 {
   103  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   104  		*(*int)(nil) = 0 // crash on unaligned uint64
   105  	}
   106  	_ = *addr // if nil, fault before taking the lock
   107  	var r uint64
   108  	addrLock(addr).lock()
   109  	r = *addr
   110  	addrLock(addr).unlock()
   111  	return r
   112  }
   113  
   114  //
   115  // LoadAcq
   116  //
   117  
   118  //go:noescape
   119  func LoadAcq32(addr *uint32) uint32
   120  
   121  //go:noescape
   122  func LoadAcqUintptr(ptr *uintptr) uintptr
   123  
   124  //
   125  // bitwise
   126  //
   127  
   128  //go:nosplit
   129  func Or8(addr *uint8, v uint8) {
   130  	// Align down to 4 bytes and use 32-bit CAS.
   131  	uaddr := uintptr(unsafe.Pointer(addr))
   132  	addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
   133  	word := uint32(v) << ((uaddr & 3) * 8) // little endian
   134  	for {
   135  		old := *addr32
   136  		if Cas32(addr32, old, old|word) {
   137  			return
   138  		}
   139  	}
   140  }
   141  
   142  //go:nosplit
   143  func Or32(addr *uint32, v uint32) {
   144  	for {
   145  		old := *addr
   146  		if Cas32(addr, old, old|v) {
   147  			return
   148  		}
   149  	}
   150  }
   151  
   152  //go:nosplit
   153  func And8(addr *uint8, v uint8) {
   154  	// Align down to 4 bytes and use 32-bit CAS.
   155  	uaddr := uintptr(unsafe.Pointer(addr))
   156  	addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
   157  	word := uint32(v) << ((uaddr & 3) * 8)    // little endian
   158  	mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian
   159  	word |= ^mask
   160  	for {
   161  		old := *addr32
   162  		if Cas32(addr32, old, old&word) {
   163  			return
   164  		}
   165  	}
   166  }
   167  
   168  //go:nosplit
   169  func And32(addr *uint32, v uint32) {
   170  	for {
   171  		old := *addr
   172  		if Cas32(addr, old, old&v) {
   173  			return
   174  		}
   175  	}
   176  }
   177  
   178  //
   179  // Swap
   180  //
   181  
   182  //go:nosplit
   183  func Swap32(addr *uint32, v uint32) uint32 {
   184  	for {
   185  		old := *addr
   186  		if Cas32(addr, old, v) {
   187  			return old
   188  		}
   189  	}
   190  }
   191  
   192  //go:noescape
   193  func Swap64(addr *uint64, v uint64) uint64
   194  
   195  //go:nosplit
   196  func SwapUintptr(addr *uintptr, v uintptr) uintptr {
   197  	return uintptr(Swap32((*uint32)(unsafe.Pointer(addr)), uint32(v)))
   198  }
   199  
   200  //go:nosplit
   201  func goSwap64(addr *uint64, v uint64) uint64 {
   202  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   203  		*(*int)(nil) = 0 // crash on unaligned uint64
   204  	}
   205  	_ = *addr // if nil, fault before taking the lock
   206  	var r uint64
   207  	addrLock(addr).lock()
   208  	r = *addr
   209  	*addr = v
   210  	addrLock(addr).unlock()
   211  	return r
   212  }
   213  
   214  //
   215  // Add
   216  //
   217  
   218  // Atomic add and return new value.
   219  //
   220  //go:nosplit
   221  func Add32(val *uint32, delta int32) uint32 {
   222  	for {
   223  		oval := *val
   224  		nval := oval + uint32(delta)
   225  		if Cas32(val, oval, nval) {
   226  			return nval
   227  		}
   228  	}
   229  }
   230  
   231  //go:noescape
   232  func Add64(addr *uint64, delta int64) uint64
   233  
   234  //go:noescape
   235  func AddUintptr(ptr *uintptr, delta uintptr) uintptr
   236  
   237  //go:nosplit
   238  func goAdd64(addr *uint64, delta int64) uint64 {
   239  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   240  		*(*int)(nil) = 0 // crash on unaligned uint64
   241  	}
   242  	_ = *addr // if nil, fault before taking the lock
   243  	var r uint64
   244  	addrLock(addr).lock()
   245  	r = *addr + uint64(delta)
   246  	*addr = r
   247  	addrLock(addr).unlock()
   248  	return r
   249  }
   250  
   251  //
   252  // Compare and swap
   253  //
   254  
   255  //go:noescape
   256  func Cas64(addr *uint64, old, new uint64) bool
   257  
   258  //go:nosplit
   259  func goCas64(addr *uint64, old, new uint64) bool {
   260  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   261  		*(*int)(nil) = 0 // crash on unaligned uint64
   262  	}
   263  	_ = *addr // if nil, fault before taking the lock
   264  	var ok bool
   265  	addrLock(addr).lock()
   266  	if *addr == old {
   267  		*addr = new
   268  		ok = true
   269  	}
   270  	addrLock(addr).unlock()
   271  	return ok
   272  }
   273  
   274  //
   275  // CasRel
   276  //
   277  
   278  //go:noescape
   279  func CasRel32(addr *uint32, old, new uint32) bool