github.com/tinygo-org/tinygo@v0.31.3-0.20240404173401-90b0bf646c27/src/runtime/atomics_critical.go (about)

     1  //go:build baremetal && !tinygo.wasm
     2  
     3  // Automatically generated file. DO NOT EDIT.
     4  // This file implements standins for non-native atomics using critical sections.
     5  
     6  package runtime
     7  
     8  import (
     9  	"runtime/interrupt"
    10  	_ "unsafe"
    11  )
    12  
    13  // Documentation:
    14  // * https://llvm.org/docs/Atomics.html
    15  // * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
    16  //
    17  // Some atomic operations are emitted inline while others are emitted as libcalls.
    18  // How many are emitted as libcalls depends on the MCU arch and core variant.
    19  
    20  // 16-bit atomics.
    21  
    22  //export __atomic_load_2
    23  func __atomic_load_2(ptr *uint16, ordering uintptr) uint16 {
    24  	// The LLVM docs for this say that there is a val argument after the pointer.
    25  	// That is a typo, and the GCC docs omit it.
    26  	mask := interrupt.Disable()
    27  	val := *ptr
    28  	interrupt.Restore(mask)
    29  	return val
    30  }
    31  
    32  //export __atomic_store_2
    33  func __atomic_store_2(ptr *uint16, val uint16, ordering uintptr) {
    34  	mask := interrupt.Disable()
    35  	*ptr = val
    36  	interrupt.Restore(mask)
    37  }
    38  
    39  //go:inline
    40  func doAtomicCAS16(ptr *uint16, expected, desired uint16) uint16 {
    41  	mask := interrupt.Disable()
    42  	old := *ptr
    43  	if old == expected {
    44  		*ptr = desired
    45  	}
    46  	interrupt.Restore(mask)
    47  	return old
    48  }
    49  
    50  //export __sync_val_compare_and_swap_2
    51  func __sync_val_compare_and_swap_2(ptr *uint16, expected, desired uint16) uint16 {
    52  	return doAtomicCAS16(ptr, expected, desired)
    53  }
    54  
    55  //export __atomic_compare_exchange_2
    56  func __atomic_compare_exchange_2(ptr, expected *uint16, desired uint16, successOrder, failureOrder uintptr) bool {
    57  	exp := *expected
    58  	old := doAtomicCAS16(ptr, exp, desired)
    59  	return old == exp
    60  }
    61  
    62  //go:inline
    63  func doAtomicSwap16(ptr *uint16, new uint16) uint16 {
    64  	mask := interrupt.Disable()
    65  	old := *ptr
    66  	*ptr = new
    67  	interrupt.Restore(mask)
    68  	return old
    69  }
    70  
    71  //export __sync_lock_test_and_set_2
    72  func __sync_lock_test_and_set_2(ptr *uint16, new uint16) uint16 {
    73  	return doAtomicSwap16(ptr, new)
    74  }
    75  
    76  //export __atomic_exchange_2
    77  func __atomic_exchange_2(ptr *uint16, new uint16, ordering uintptr) uint16 {
    78  	return doAtomicSwap16(ptr, new)
    79  }
    80  
    81  //go:inline
    82  func doAtomicAdd16(ptr *uint16, value uint16) (old, new uint16) {
    83  	mask := interrupt.Disable()
    84  	old = *ptr
    85  	new = old + value
    86  	*ptr = new
    87  	interrupt.Restore(mask)
    88  	return old, new
    89  }
    90  
    91  //export __atomic_fetch_add_2
    92  func __atomic_fetch_add_2(ptr *uint16, value uint16, ordering uintptr) uint16 {
    93  	old, _ := doAtomicAdd16(ptr, value)
    94  	return old
    95  }
    96  
    97  //export __sync_fetch_and_add_2
    98  func __sync_fetch_and_add_2(ptr *uint16, value uint16) uint16 {
    99  	old, _ := doAtomicAdd16(ptr, value)
   100  	return old
   101  }
   102  
   103  //export __atomic_add_fetch_2
   104  func __atomic_add_fetch_2(ptr *uint16, value uint16, ordering uintptr) uint16 {
   105  	_, new := doAtomicAdd16(ptr, value)
   106  	return new
   107  }
   108  
   109  // 32-bit atomics.
   110  
   111  //export __atomic_load_4
   112  func __atomic_load_4(ptr *uint32, ordering uintptr) uint32 {
   113  	// The LLVM docs for this say that there is a val argument after the pointer.
   114  	// That is a typo, and the GCC docs omit it.
   115  	mask := interrupt.Disable()
   116  	val := *ptr
   117  	interrupt.Restore(mask)
   118  	return val
   119  }
   120  
   121  //export __atomic_store_4
   122  func __atomic_store_4(ptr *uint32, val uint32, ordering uintptr) {
   123  	mask := interrupt.Disable()
   124  	*ptr = val
   125  	interrupt.Restore(mask)
   126  }
   127  
   128  //go:inline
   129  func doAtomicCAS32(ptr *uint32, expected, desired uint32) uint32 {
   130  	mask := interrupt.Disable()
   131  	old := *ptr
   132  	if old == expected {
   133  		*ptr = desired
   134  	}
   135  	interrupt.Restore(mask)
   136  	return old
   137  }
   138  
   139  //export __sync_val_compare_and_swap_4
   140  func __sync_val_compare_and_swap_4(ptr *uint32, expected, desired uint32) uint32 {
   141  	return doAtomicCAS32(ptr, expected, desired)
   142  }
   143  
   144  //export __atomic_compare_exchange_4
   145  func __atomic_compare_exchange_4(ptr, expected *uint32, desired uint32, successOrder, failureOrder uintptr) bool {
   146  	exp := *expected
   147  	old := doAtomicCAS32(ptr, exp, desired)
   148  	return old == exp
   149  }
   150  
   151  //go:inline
   152  func doAtomicSwap32(ptr *uint32, new uint32) uint32 {
   153  	mask := interrupt.Disable()
   154  	old := *ptr
   155  	*ptr = new
   156  	interrupt.Restore(mask)
   157  	return old
   158  }
   159  
   160  //export __sync_lock_test_and_set_4
   161  func __sync_lock_test_and_set_4(ptr *uint32, new uint32) uint32 {
   162  	return doAtomicSwap32(ptr, new)
   163  }
   164  
   165  //export __atomic_exchange_4
   166  func __atomic_exchange_4(ptr *uint32, new uint32, ordering uintptr) uint32 {
   167  	return doAtomicSwap32(ptr, new)
   168  }
   169  
   170  //go:inline
   171  func doAtomicAdd32(ptr *uint32, value uint32) (old, new uint32) {
   172  	mask := interrupt.Disable()
   173  	old = *ptr
   174  	new = old + value
   175  	*ptr = new
   176  	interrupt.Restore(mask)
   177  	return old, new
   178  }
   179  
   180  //export __atomic_fetch_add_4
   181  func __atomic_fetch_add_4(ptr *uint32, value uint32, ordering uintptr) uint32 {
   182  	old, _ := doAtomicAdd32(ptr, value)
   183  	return old
   184  }
   185  
   186  //export __sync_fetch_and_add_4
   187  func __sync_fetch_and_add_4(ptr *uint32, value uint32) uint32 {
   188  	old, _ := doAtomicAdd32(ptr, value)
   189  	return old
   190  }
   191  
   192  //export __atomic_add_fetch_4
   193  func __atomic_add_fetch_4(ptr *uint32, value uint32, ordering uintptr) uint32 {
   194  	_, new := doAtomicAdd32(ptr, value)
   195  	return new
   196  }
   197  
   198  // 64-bit atomics.
   199  
   200  //export __atomic_load_8
   201  func __atomic_load_8(ptr *uint64, ordering uintptr) uint64 {
   202  	// The LLVM docs for this say that there is a val argument after the pointer.
   203  	// That is a typo, and the GCC docs omit it.
   204  	mask := interrupt.Disable()
   205  	val := *ptr
   206  	interrupt.Restore(mask)
   207  	return val
   208  }
   209  
   210  //export __atomic_store_8
   211  func __atomic_store_8(ptr *uint64, val uint64, ordering uintptr) {
   212  	mask := interrupt.Disable()
   213  	*ptr = val
   214  	interrupt.Restore(mask)
   215  }
   216  
   217  //go:inline
   218  func doAtomicCAS64(ptr *uint64, expected, desired uint64) uint64 {
   219  	mask := interrupt.Disable()
   220  	old := *ptr
   221  	if old == expected {
   222  		*ptr = desired
   223  	}
   224  	interrupt.Restore(mask)
   225  	return old
   226  }
   227  
   228  //export __sync_val_compare_and_swap_8
   229  func __sync_val_compare_and_swap_8(ptr *uint64, expected, desired uint64) uint64 {
   230  	return doAtomicCAS64(ptr, expected, desired)
   231  }
   232  
   233  //export __atomic_compare_exchange_8
   234  func __atomic_compare_exchange_8(ptr, expected *uint64, desired uint64, successOrder, failureOrder uintptr) bool {
   235  	exp := *expected
   236  	old := doAtomicCAS64(ptr, exp, desired)
   237  	return old == exp
   238  }
   239  
   240  //go:inline
   241  func doAtomicSwap64(ptr *uint64, new uint64) uint64 {
   242  	mask := interrupt.Disable()
   243  	old := *ptr
   244  	*ptr = new
   245  	interrupt.Restore(mask)
   246  	return old
   247  }
   248  
   249  //export __sync_lock_test_and_set_8
   250  func __sync_lock_test_and_set_8(ptr *uint64, new uint64) uint64 {
   251  	return doAtomicSwap64(ptr, new)
   252  }
   253  
   254  //export __atomic_exchange_8
   255  func __atomic_exchange_8(ptr *uint64, new uint64, ordering uintptr) uint64 {
   256  	return doAtomicSwap64(ptr, new)
   257  }
   258  
   259  //go:inline
   260  func doAtomicAdd64(ptr *uint64, value uint64) (old, new uint64) {
   261  	mask := interrupt.Disable()
   262  	old = *ptr
   263  	new = old + value
   264  	*ptr = new
   265  	interrupt.Restore(mask)
   266  	return old, new
   267  }
   268  
   269  //export __atomic_fetch_add_8
   270  func __atomic_fetch_add_8(ptr *uint64, value uint64, ordering uintptr) uint64 {
   271  	old, _ := doAtomicAdd64(ptr, value)
   272  	return old
   273  }
   274  
   275  //export __sync_fetch_and_add_8
   276  func __sync_fetch_and_add_8(ptr *uint64, value uint64) uint64 {
   277  	old, _ := doAtomicAdd64(ptr, value)
   278  	return old
   279  }
   280  
   281  //export __atomic_add_fetch_8
   282  func __atomic_add_fetch_8(ptr *uint64, value uint64, ordering uintptr) uint64 {
   283  	_, new := doAtomicAdd64(ptr, value)
   284  	return new
   285  }