github.com/256dpi/max-go@v0.7.0/lib/max/ext_atomic.h (about)

     1  // ext_atomic.h copyright 2008 cycling '74
     2  
     3  #ifndef __EXT_ATOMIC_H__
     4  #define __EXT_ATOMIC_H__
     5  
     6  #if __GNUC__ && !defined(MAC_VERSION)
     7  #define USE_GNUC_ATOMICS
     8  #endif
     9  
    10  #ifdef USE_GNUC_ATOMICS
    11  
    12  #include <stdint.h>
    13  
    14  typedef volatile int32_t t_int32_atomic;
    15  typedef volatile int64_t t_int64_atomic;
    16  typedef volatile uint32_t t_uint32_atomic;
    17  typedef volatile uint64_t t_uint64_atomic;
    18  
    19  /** increment an atomic int value
    20      @ingroup threading
    21      return value of ATOMIC_INCREMENT and ATOMIC_DECREMENT is the *new* value after performing the operation
    22  */
    23  #define ATOMIC_INCREMENT(atomicptr) (__atomic_add_fetch(atomicptr, 1, __ATOMIC_RELAXED))
    24  
    25  /** increment an atomic int value with a memory barrier
    26      @ingroup threading
    27      return value of ATOMIC_INCREMENT and ATOMIC_DECREMENT is the *new* value after performing the operation
    28  */
    29  #define ATOMIC_INCREMENT_BARRIER(atomicptr) (__atomic_add_fetch(atomicptr, 1, __ATOMIC_SEQ_CST))
    30  
    31  /** decrement an atomic int value
    32      @ingroup threading
    33      return value of ATOMIC_INCREMENT and ATOMIC_DECREMENT is the *new* value after performing the operation
    34  */
    35  #define ATOMIC_DECREMENT(atomicptr) (__atomic_sub_fetch(atomicptr, 1, __ATOMIC_RELAXED))
    36  
    37  /** decrement an atomic int value with a memory barrier
    38      @ingroup threading
    39      return value of ATOMIC_INCREMENT and ATOMIC_DECREMENT is the *new* value after performing the operation
    40  */
    41  #define ATOMIC_DECREMENT_BARRIER(atomicptr) (__atomic_sub_fetch(atomicptr, 1, __ATOMIC_SEQ_CST))
    42  
    43  
    44  static
    45  #ifdef __cplusplus
    46  bool
    47  #else
    48  _Bool
    49  #endif
    50  atomic_compare_and_swap_32(t_int32_atomic *ptr, t_int32_atomic old, t_int32_atomic new_)
    51  {
    52  	return __atomic_compare_exchange_n(ptr, (int32_t *)&old, new_, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
    53  }
    54  
    55  static
    56  #ifdef __cplusplus
    57  bool
    58  #else
    59  _Bool
    60  #endif
    61  atomic_compare_and_swap_64(t_int64_atomic *ptr, t_int64_atomic old, t_int64_atomic new_)
    62  {
    63  	return __atomic_compare_exchange_n(ptr, (int64_t *)&old, new_, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
    64  }
    65  
    66  #define ATOMIC_COMPARE_SWAP32(oldvalue, newvalue, atomicptr) atomic_compare_and_swap_32(atomicptr, oldvalue, newvalue)
    67  #define ATOMIC_COMPARE_SWAP64(oldvalue, newvalue, atomicptr) atomic_compare_and_swap_64(atomicptr, oldvalue, newvalue)
    68  
    69  
    70  #else // USE_GNUC_ATOMICS
    71  
    72  #ifdef MAC_VERSION
    73  
    74  #include <stdint.h>
    75  #include <libkern/OSAtomic.h>
    76  
    77  typedef volatile int32_t t_int32_atomic;
    78  typedef volatile int64_t t_int64_atomic;
    79  typedef volatile u_int32_t t_uint32_atomic;
    80  typedef volatile u_int64_t t_uint64_atomic;
    81  
    82  /** increment an atomic int value
    83  	@ingroup threading
    84  	return value of ATOMIC_INCREMENT and ATOMIC_DECREMENT is the *new* value after performing the operation
    85  */
    86  #define ATOMIC_INCREMENT(atomicptr) OSAtomicIncrement32((int32_t *)atomicptr)
    87  
    88  /** increment an atomic int value with a memory barrier
    89  	@ingroup threading
    90  	return value of ATOMIC_INCREMENT and ATOMIC_DECREMENT is the *new* value after performing the operation
    91  */
    92  #define ATOMIC_INCREMENT_BARRIER(atomicptr) OSAtomicIncrement32Barrier((int32_t *)atomicptr)
    93  
    94  /** decrement an atomic int value
    95  	@ingroup threading
    96  	return value of ATOMIC_INCREMENT and ATOMIC_DECREMENT is the *new* value after performing the operation
    97  */
    98  #define ATOMIC_DECREMENT(atomicptr) OSAtomicDecrement32((int32_t *)atomicptr)
    99  
   100  /** decrement an atomic int value with a memory barrier
   101  	@ingroup threading
   102  	return value of ATOMIC_INCREMENT and ATOMIC_DECREMENT is the *new* value after performing the operation
   103  */
   104  #define ATOMIC_DECREMENT_BARRIER(atomicptr) OSAtomicDecrement32Barrier((int32_t *)atomicptr)
   105  
   106  #define ATOMIC_COMPARE_SWAP32(oldvalue, newvalue, atomicptr) (OSAtomicCompareAndSwap32Barrier(oldvalue, newvalue, atomicptr))
   107  #define ATOMIC_COMPARE_SWAP64(oldvalue, newvalue, atomicptr) (OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue, atomicptr))
   108  
   109  #else // WIN_VERSION
   110  
   111  // rbs: intrin.h is not compatible with C, only C++
   112  // #include <intrin.h>
   113  typedef volatile long t_int32_atomic;
   114  typedef volatile __int64 t_int64_atomic;
   115  typedef volatile unsigned long t_uint32_atomic;
   116  typedef volatile unsigned __int64 t_uint64_atomic;
   117  
   118  BEGIN_USING_C_LINKAGE
   119  
   120  LONG  __cdecl _InterlockedIncrement(LONG volatile *Addend);
   121  LONG  __cdecl _InterlockedDecrement(LONG volatile *Addend);
   122  LONG  __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
   123  __int64  __cdecl _InterlockedCompareExchange64(__int64 volatile *Dest, __int64 Exchange, __int64 Comp);
   124  //   LONG  __cdecl _InterlockedExchange(LPLONG volatile Target, LONG Value);
   125  //   LONG  __cdecl _InterlockedExchangeAdd(LPLONG volatile Addend, LONG Value);
   126  
   127  END_USING_C_LINKAGE
   128  
   129  #pragma intrinsic (_InterlockedIncrement)
   130  #pragma intrinsic (_InterlockedDecrement)
   131  #pragma intrinsic (_InterlockedCompareExchange)
   132  #pragma intrinsic (_InterlockedCompareExchange64)
   133  
   134  /**	Use this routine for incrementing a global counter using a threadsafe and multiprocessor safe method.
   135  	@ingroup			threading
   136  	@param	atomicptr	pointer to the (int) counter.
   137  */
   138  
   139  // on windows I don't think there are non-barrier atomic increment / decrement functions
   140  // perhaps could be done with inline assembly?
   141  
   142  #define ATOMIC_INCREMENT(atomicptr)			  (_InterlockedIncrement(atomicptr))
   143  #define ATOMIC_INCREMENT_BARRIER(atomicptr)   (_InterlockedIncrement(atomicptr))
   144  
   145  
   146  /**	Use this routine for decrementing a global counter using a threadsafe and multiprocessor safe method.
   147  	@ingroup	threading
   148  	@param	atomicptr	pointer to the (int) counter.
   149  */
   150  #define ATOMIC_DECREMENT(atomicptr)			  (_InterlockedDecrement(atomicptr))
   151  #define ATOMIC_DECREMENT_BARRIER(atomicptr)   (_InterlockedDecrement(atomicptr))
   152  
   153  /** atomic compare exchange does this:
   154      - if (*atomicptr == oldvalue) *atomicptr = newvalue;
   155  	- all of above done atomically
   156  	- return value is boolean: true if exchange was done
   157  	@ingroup	threading
   158  	@param	atomicptr		pointer to the atomic value
   159  	@param	newvalue		value that will be assigned to *atomicptr if test succeeds
   160  	@param	oldvalue		newvalue is only stored if original value equals oldvalue
   161  */
   162  #define ATOMIC_COMPARE_SWAP32(oldvalue, newvalue, atomicptr) (_InterlockedCompareExchange(atomicptr, newvalue, oldvalue)==oldvalue)
   163  #define ATOMIC_COMPARE_SWAP64(oldvalue, newvalue, atomicptr) (_InterlockedCompareExchange64(atomicptr, newvalue, oldvalue)==oldvalue)
   164  
   165  #endif // WIN_VERSION
   166  
   167  #endif // USE_GNUC_ATOMICS
   168  
   169  #endif // #ifndef __EXT_ATOMIC_H__