github.com/afumu/libc@v0.0.6/musl/arch/x86_64/atomic_arch.h (about)

     1  #define a_cas a_cas
     2  static inline int a_cas(volatile int *p, int t, int s)
     3  {
     4  	__asm__ __volatile__ (
     5  		"lock ; cmpxchg %3, %1"
     6  		: "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
     7  	return t;
     8  }
     9  
    10  #define a_cas_p a_cas_p
    11  static inline void *a_cas_p(volatile void *p, void *t, void *s)
    12  {
    13  	__asm__( "lock ; cmpxchg %3, %1"
    14  		: "=a"(t), "=m"(*(void *volatile *)p)
    15  		: "a"(t), "r"(s) : "memory" );
    16  	return t;
    17  }
    18  
    19  #define a_swap a_swap
    20  static inline int a_swap(volatile int *p, int v)
    21  {
    22  	__asm__ __volatile__(
    23  		"xchg %0, %1"
    24  		: "=r"(v), "=m"(*p) : "0"(v) : "memory" );
    25  	return v;
    26  }
    27  
    28  #define a_fetch_add a_fetch_add
    29  static inline int a_fetch_add(volatile int *p, int v)
    30  {
    31  	__asm__ __volatile__(
    32  		"lock ; xadd %0, %1"
    33  		: "=r"(v), "=m"(*p) : "0"(v) : "memory" );
    34  	return v;
    35  }
    36  
    37  #define a_and a_and
    38  static inline void a_and(volatile int *p, int v)
    39  {
    40  	__asm__ __volatile__(
    41  		"lock ; and %1, %0"
    42  		: "=m"(*p) : "r"(v) : "memory" );
    43  }
    44  
    45  #define a_or a_or
    46  static inline void a_or(volatile int *p, int v)
    47  {
    48  	__asm__ __volatile__(
    49  		"lock ; or %1, %0"
    50  		: "=m"(*p) : "r"(v) : "memory" );
    51  }
    52  
    53  #define a_and_64 a_and_64
    54  static inline void a_and_64(volatile uint64_t *p, uint64_t v)
    55  {
    56  	__asm__ __volatile(
    57  		"lock ; and %1, %0"
    58  		 : "=m"(*p) : "r"(v) : "memory" );
    59  }
    60  
    61  #define a_or_64 a_or_64
    62  static inline void a_or_64(volatile uint64_t *p, uint64_t v)
    63  {
    64  	__asm__ __volatile__(
    65  		"lock ; or %1, %0"
    66  		 : "=m"(*p) : "r"(v) : "memory" );
    67  }
    68  
    69  #define a_inc a_inc
    70  static inline void a_inc(volatile int *p)
    71  {
    72  	__asm__ __volatile__(
    73  		"lock ; incl %0"
    74  		: "=m"(*p) : "m"(*p) : "memory" );
    75  }
    76  
    77  #define a_dec a_dec
    78  static inline void a_dec(volatile int *p)
    79  {
    80  	__asm__ __volatile__(
    81  		"lock ; decl %0"
    82  		: "=m"(*p) : "m"(*p) : "memory" );
    83  }
    84  
    85  #define a_store a_store
    86  static inline void a_store(volatile int *p, int x)
    87  {
    88  	__asm__ __volatile__(
    89  		"mov %1, %0 ; lock ; orl $0,(%%rsp)"
    90  		: "=m"(*p) : "r"(x) : "memory" );
    91  }
    92  
    93  #define a_barrier a_barrier
    94  static inline void a_barrier()
    95  {
    96  	__asm__ __volatile__( "" : : : "memory" );
    97  }
    98  
    99  #define a_spin a_spin
   100  static inline void a_spin()
   101  {
   102  	__asm__ __volatile__( "pause" : : : "memory" );
   103  }
   104  
   105  #define a_crash a_crash
   106  static inline void a_crash()
   107  {
   108  	__asm__ __volatile__( "hlt" : : : "memory" );
   109  }
   110  
   111  #define a_ctz_64 a_ctz_64
   112  static inline int a_ctz_64(uint64_t x)
   113  {
   114  	__asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
   115  	return x;
   116  }
   117  
   118  #define a_clz_64 a_clz_64
   119  static inline int a_clz_64(uint64_t x)
   120  {
   121  	__asm__( "bsr %1,%0 ; xor $63,%0" : "=r"(x) : "r"(x) );
   122  	return x;
   123  }