summaryrefslogtreecommitdiff
path: root/source/kit/atomic.win32.c
diff options
context:
space:
mode:
authorMitya Selivanov <automainint@guattari.tech>2023-12-29 06:21:33 +0100
committerMitya Selivanov <automainint@guattari.tech>2023-12-29 06:21:33 +0100
commit2d6c8fec45b23a8a28668ecf3ef281139ab778a7 (patch)
tree75d2a8538992129a83c0c2b83688289443d697e5 /source/kit/atomic.win32.c
parent820b171245f2f14766f3accdb0246a4e2c0d596a (diff)
downloadsaw-2d6c8fec45b23a8a28668ecf3ef281139ab778a7.zip
refactor dependencies; include dependencies source code
Diffstat (limited to 'source/kit/atomic.win32.c')
-rw-r--r--source/kit/atomic.win32.c234
1 files changed, 234 insertions, 0 deletions
diff --git a/source/kit/atomic.win32.c b/source/kit/atomic.win32.c
new file mode 100644
index 0000000..791f8fe
--- /dev/null
+++ b/source/kit/atomic.win32.c
@@ -0,0 +1,234 @@
+#include "atomic.h"
+
+#ifdef _MSC_VER
+static_assert(sizeof(char) == 1, "Wrong char size");
+static_assert(sizeof(short) == 2, "Wrong short size");
+static_assert(sizeof(int) == 4, "Wrong int size");
+
+# include <intrin.h>
+
+void kit_atomic_store_explicit_8(u8 volatile *var, u8 value,
+ i32 memory_order) {
+ char volatile *dst = (char volatile *) var;
+ char src = (char) value;
+
+ switch (memory_order) {
+ case memory_order_relaxed: *dst = src; break;
+ default: _InterlockedExchange8(dst, src);
+ }
+}
+
+void kit_atomic_store_explicit_16(u16 volatile *var, u16 value,
+ i32 memory_order) {
+ short volatile *dst = (short volatile *) var;
+ short src = (short) value;
+
+ switch (memory_order) {
+ case memory_order_relaxed: *dst = src; break;
+ default: _InterlockedExchange16(dst, src);
+ }
+}
+
+void kit_atomic_store_explicit_32(u32 volatile *var, u32 value,
+ i32 memory_order) {
+ int volatile *dst = (int volatile *) var;
+ int src = (int) value;
+
+ switch (memory_order) {
+ case memory_order_relaxed: *dst = src; break;
+ default: _InterlockedExchange(dst, src);
+ }
+}
+
+void kit_atomic_store_explicit_64(u64 volatile *var, u64 value,
+ i32 memory_order) {
+ __int64 volatile *dst = (__int64 volatile *) var;
+ __int64 src = (__int64) value;
+
+ switch (memory_order) {
+ case memory_order_relaxed: *dst = src; break;
+ default:
+# ifdef _WIN64
+ _InterlockedExchange64(dst, src);
+# else
+ assert(0);
+ _InterlockedExchange((int volatile *) dst, (int) src);
+# endif
+ }
+}
+
+u8 kit_atomic_load_explicit_8(volatile u8 *var, i32 memory_order) {
+ char volatile *dst = (char volatile *) var;
+
+ if (memory_order == memory_order_relaxed)
+ return (u8) *dst;
+
+ return (u8) _InterlockedOr8(dst, 0);
+}
+
+u16 kit_atomic_load_explicit_16(u16 volatile *var, i32 memory_order) {
+ short volatile *dst = (short volatile *) var;
+
+ if (memory_order == memory_order_relaxed)
+ return (u16) *dst;
+
+ return (u16) _InterlockedOr16(dst, 0);
+}
+
+u32 kit_atomic_load_explicit_32(u32 volatile *var, i32 memory_order) {
+ int volatile *dst = (int volatile *) var;
+
+ if (memory_order == memory_order_relaxed)
+ return (u32) *dst;
+
+ return (u32) _InterlockedOr(dst, 0);
+}
+
+u64 kit_atomic_load_explicit_64(u64 volatile *var, i32 memory_order) {
+ __int64 volatile *dst = (__int64 volatile *) var;
+
+ if (memory_order == memory_order_relaxed)
+ return (u64) *dst;
+
+# ifdef _WIN64
+ return (u64) _InterlockedOr64(dst, 0);
+# else
+ assert(0);
+ return (u64) _InterlockedOr((int volatile *) dst, 0);
+# endif
+}
+
+u8 kit_atomic_exchange_explicit_8(volatile u8 *var, u8 value,
+ i32 memory_order) {
+ char volatile *dst = (char volatile *) var;
+ char src = (char) value;
+
+ return (u8) _InterlockedExchange8(dst, src);
+}
+
+u16 kit_atomic_exchange_explicit_16(u16 volatile *var, u16 value,
+ i32 memory_order) {
+ short volatile *dst = (short volatile *) var;
+ short src = (short) value;
+
+ return (u16) _InterlockedExchange16(dst, src);
+}
+
+u32 kit_atomic_exchange_explicit_32(u32 volatile *var, u32 value,
+ i32 memory_order) {
+ int volatile *dst = (int volatile *) var;
+ int src = (int) value;
+
+ return (u32) _InterlockedExchange(dst, src);
+}
+
+u64 kit_atomic_exchange_explicit_64(u64 volatile *var, u64 value,
+ i32 memory_order) {
+ __int64 volatile *dst = (__int64 volatile *) var;
+ __int64 src = (__int64) value;
+
+# ifdef _WIN64
+ return (u64) _InterlockedExchange64(dst, src);
+# else
+ assert(0);
+ return (u64) _InterlockedExchange((int volatile *) dst, (int) src);
+# endif
+}
+
+int kit_atomic_compare_exchange_explicit_8(volatile u8 *var,
+ u8 *expected, u8 value,
+ i32 memory_order_succ_,
+ i32 memory_order_fail_) {
+ char volatile *dst = (char volatile *) var;
+ char src = (char) value;
+ char exp = (char) *expected;
+
+ *expected = (u8) _InterlockedCompareExchange8(dst, src, exp);
+
+ return exp == (char) *expected;
+}
+
+int kit_atomic_compare_exchange_explicit_16(u16 volatile *var,
+ u16 *expected, u16 value,
+ i32 memory_order_succ_,
+ i32 memory_order_fail_) {
+ short volatile *dst = (short volatile *) var;
+ short src = (short) value;
+ short exp = (short) *expected;
+
+ *expected = (u16) _InterlockedCompareExchange16(dst, src, exp);
+
+ return exp == (short) *expected;
+}
+
+int kit_atomic_compare_exchange_explicit_32(u32 volatile *var,
+ u32 *expected, u32 value,
+ i32 memory_order_succ_,
+ i32 memory_order_fail_) {
+ int volatile *dst = (int volatile *) var;
+ int src = (int) value;
+ int exp = (int) *expected;
+
+ *expected = (u32) _InterlockedCompareExchange(dst, src, exp);
+
+ return exp == (int) *expected;
+}
+
+int kit_atomic_compare_exchange_explicit_64(u64 volatile *var,
+ u64 *expected, u64 value,
+ i32 memory_order_succ_,
+ i32 memory_order_fail_) {
+ __int64 volatile *dst = (__int64 volatile *) var;
+ __int64 src = (__int64) value;
+ __int64 exp = (__int64) *expected;
+
+# ifdef _WIN64
+ *expected = (u64) _InterlockedCompareExchange64(dst, src, exp);
+# else
+ assert(0);
+ *expected = (u64) _InterlockedCompareExchange((int volatile *) dst,
+ (int) src, (int) exp);
+# endif
+
+ return exp == (__int64) *expected;
+}
+
+u8 kit_atomic_fetch_add_explicit_8(volatile u8 *var, u8 value,
+ i32 memory_order) {
+ char volatile *dst = (char volatile *) var;
+ char src = (char) value;
+
+ return (u8) _InterlockedExchangeAdd8(dst, src);
+}
+
+u16 kit_atomic_fetch_add_explicit_16(u16 volatile *var, u16 value,
+ i32 memory_order) {
+ short volatile *dst = (short volatile *) var;
+ short src = (short) value;
+
+ return (u16) _InterlockedExchangeAdd16(dst, src);
+}
+
+u32 kit_atomic_fetch_add_explicit_32(u32 volatile *var, u32 value,
+ i32 memory_order) {
+ int volatile *dst = (int volatile *) var;
+ int src = (int) value;
+
+ return (u32) _InterlockedExchangeAdd(dst, src);
+}
+
+u64 kit_atomic_fetch_add_explicit_64(u64 volatile *var, u64 value,
+ i32 memory_order) {
+ __int64 volatile *dst = (__int64 volatile *) var;
+ __int64 src = (__int64) value;
+
+# ifdef _WIN64
+ return (u64) _InterlockedExchangeAdd64(dst, src);
+# else
+ assert(0);
+ return (u64) _InterlockedExchangeAdd((int volatile *) dst,
+ (int) src);
+# endif
+}
+
+#endif