github.com/zxy12/go_duplicate_112_new@v0.0.0-20200807091221-747231827200/src/runtime/internal/atomic/atomic_arm.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build arm
     6  
     7  package atomic
     8  
     9  import (
    10  	"internal/cpu"
    11  	"unsafe"
    12  )
    13  
    14  type spinlock struct {
    15  	v uint32
    16  }
    17  
    18  //go:nosplit
    19  func (l *spinlock) lock() {
    20  	for {
    21  		if Cas(&l.v, 0, 1) {
    22  			return
    23  		}
    24  	}
    25  }
    26  
    27  //go:nosplit
    28  func (l *spinlock) unlock() {
    29  	Store(&l.v, 0)
    30  }
    31  
    32  var locktab [57]struct {
    33  	l   spinlock
    34  	pad [cpu.CacheLinePadSize - unsafe.Sizeof(spinlock{})]byte
    35  }
    36  
    37  func addrLock(addr *uint64) *spinlock {
    38  	return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
    39  }
    40  
    41  // Atomic add and return new value.
    42  //go:nosplit
    43  func Xadd(val *uint32, delta int32) uint32 {
    44  	for {
    45  		oval := *val
    46  		nval := oval + uint32(delta)
    47  		if Cas(val, oval, nval) {
    48  			return nval
    49  		}
    50  	}
    51  }
    52  
    53  //go:noescape
    54  func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
    55  
    56  //go:nosplit
    57  func Xchg(addr *uint32, v uint32) uint32 {
    58  	for {
    59  		old := *addr
    60  		if Cas(addr, old, v) {
    61  			return old
    62  		}
    63  	}
    64  }
    65  
    66  //go:nosplit
    67  func Xchguintptr(addr *uintptr, v uintptr) uintptr {
    68  	return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
    69  }
    70  
    71  // Not noescape -- it installs a pointer to addr.
    72  func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer)
    73  
    74  //go:noescape
    75  func Store(addr *uint32, v uint32)
    76  
    77  //go:noescape
    78  func StoreRel(addr *uint32, v uint32)
    79  
    80  //go:nosplit
    81  func goCas64(addr *uint64, old, new uint64) bool {
    82  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
    83  		*(*int)(nil) = 0 // crash on unaligned uint64
    84  	}
    85  	_ = *addr // if nil, fault before taking the lock
    86  	var ok bool
    87  	addrLock(addr).lock()
    88  	if *addr == old {
    89  		*addr = new
    90  		ok = true
    91  	}
    92  	addrLock(addr).unlock()
    93  	return ok
    94  }
    95  
    96  //go:nosplit
    97  func goXadd64(addr *uint64, delta int64) uint64 {
    98  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
    99  		*(*int)(nil) = 0 // crash on unaligned uint64
   100  	}
   101  	_ = *addr // if nil, fault before taking the lock
   102  	var r uint64
   103  	addrLock(addr).lock()
   104  	r = *addr + uint64(delta)
   105  	*addr = r
   106  	addrLock(addr).unlock()
   107  	return r
   108  }
   109  
   110  //go:nosplit
   111  func goXchg64(addr *uint64, v uint64) uint64 {
   112  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   113  		*(*int)(nil) = 0 // crash on unaligned uint64
   114  	}
   115  	_ = *addr // if nil, fault before taking the lock
   116  	var r uint64
   117  	addrLock(addr).lock()
   118  	r = *addr
   119  	*addr = v
   120  	addrLock(addr).unlock()
   121  	return r
   122  }
   123  
   124  //go:nosplit
   125  func goLoad64(addr *uint64) uint64 {
   126  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   127  		*(*int)(nil) = 0 // crash on unaligned uint64
   128  	}
   129  	_ = *addr // if nil, fault before taking the lock
   130  	var r uint64
   131  	addrLock(addr).lock()
   132  	r = *addr
   133  	addrLock(addr).unlock()
   134  	return r
   135  }
   136  
   137  //go:nosplit
   138  func goStore64(addr *uint64, v uint64) {
   139  	if uintptr(unsafe.Pointer(addr))&7 != 0 {
   140  		*(*int)(nil) = 0 // crash on unaligned uint64
   141  	}
   142  	_ = *addr // if nil, fault before taking the lock
   143  	addrLock(addr).lock()
   144  	*addr = v
   145  	addrLock(addr).unlock()
   146  }
   147  
   148  //go:nosplit
   149  func Or8(addr *uint8, v uint8) {
   150  	// Align down to 4 bytes and use 32-bit CAS.
   151  	uaddr := uintptr(unsafe.Pointer(addr))
   152  	addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
   153  	word := uint32(v) << ((uaddr & 3) * 8) // little endian
   154  	for {
   155  		old := *addr32
   156  		if Cas(addr32, old, old|word) {
   157  			return
   158  		}
   159  	}
   160  }
   161  
   162  //go:nosplit
   163  func And8(addr *uint8, v uint8) {
   164  	// Align down to 4 bytes and use 32-bit CAS.
   165  	uaddr := uintptr(unsafe.Pointer(addr))
   166  	addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
   167  	word := uint32(v) << ((uaddr & 3) * 8)    // little endian
   168  	mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian
   169  	word |= ^mask
   170  	for {
   171  		old := *addr32
   172  		if Cas(addr32, old, old&word) {
   173  			return
   174  		}
   175  	}
   176  }
   177  
   178  //go:nosplit
   179  func armcas(ptr *uint32, old, new uint32) bool
   180  
   181  //go:noescape
   182  func Load(addr *uint32) uint32
   183  
   184  //go:noescape
   185  func Loadp(addr unsafe.Pointer) unsafe.Pointer
   186  
   187  //go:noescape
   188  func LoadAcq(addr *uint32) uint32
   189  
   190  //go:noescape
   191  func Cas64(addr *uint64, old, new uint64) bool
   192  
   193  //go:noescape
   194  func CasRel(addr *uint32, old, new uint32) bool
   195  
   196  //go:noescape
   197  func Xadd64(addr *uint64, delta int64) uint64
   198  
   199  //go:noescape
   200  func Xchg64(addr *uint64, v uint64) uint64
   201  
   202  //go:noescape
   203  func Load64(addr *uint64) uint64
   204  
   205  //go:noescape
   206  func Store64(addr *uint64, v uint64)