summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--source/kit/atomic.h69
-rw-r--r--source/kit/atomic.win32.c191
2 files changed, 201 insertions, 59 deletions
diff --git a/source/kit/atomic.h b/source/kit/atomic.h
index 5763808..b0235ad 100644
--- a/source/kit/atomic.h
+++ b/source/kit/atomic.h
@@ -9,8 +9,10 @@ extern "C" {
#ifndef _MSC_VER
# include <stdatomic.h>
-# define KIT_ATOMIC(type_) _Atomic type_
+# define KIT_ATOMIC(type_) type_ _Atomic
#else
+# define KIT_ATOMIC(type_) type_ volatile
+
enum {
memory_order_relaxed,
memory_order_consume,
@@ -19,61 +21,60 @@ enum {
memory_order_acq_rel,
memory_order_seq_cst
};
-# define KIT_ATOMIC(type_) volatile type_
-void kit_atomic_store_explicit_8(volatile uint8_t *var, uint8_t value,
+void kit_atomic_store_explicit_8(uint8_t volatile *var, uint8_t value,
int memory_order);
-void kit_atomic_store_explicit_16(volatile uint16_t *var,
+void kit_atomic_store_explicit_16(uint16_t volatile *var,
uint16_t value, int memory_order);
-void kit_atomic_store_explicit_32(volatile uint32_t *var,
+void kit_atomic_store_explicit_32(uint32_t volatile *var,
uint32_t value, int memory_order);
-void kit_atomic_store_explicit_64(volatile uint64_t *var,
+void kit_atomic_store_explicit_64(uint64_t volatile *var,
uint64_t value, int memory_order);
-uint8_t kit_atomic_load_explicit_8(volatile uint8_t *var,
+uint8_t kit_atomic_load_explicit_8(uint8_t volatile *var,
int memory_order);
-uint16_t kit_atomic_load_explicit_16(volatile uint16_t *var,
+uint16_t kit_atomic_load_explicit_16(uint16_t volatile *var,
int memory_order);
-uint32_t kit_atomic_load_explicit_32(volatile uint32_t *var,
+uint32_t kit_atomic_load_explicit_32(uint32_t volatile *var,
int memory_order);
-uint64_t kit_atomic_load_explicit_64(volatile uint64_t *var,
+uint64_t kit_atomic_load_explicit_64(uint64_t volatile *var,
int memory_order);
-uint8_t kit_atomic_exchange_explicit_8(volatile uint8_t *var,
+uint8_t kit_atomic_exchange_explicit_8(uint8_t volatile *var,
uint8_t value,
int memory_order);
-uint16_t kit_atomic_exchange_explicit_16(volatile uint16_t *var,
+uint16_t kit_atomic_exchange_explicit_16(uint16_t volatile *var,
uint16_t value,
int memory_order);
-uint32_t kit_atomic_exchange_explicit_32(volatile uint32_t *var,
+uint32_t kit_atomic_exchange_explicit_32(uint32_t volatile *var,
uint32_t value,
int memory_order);
-uint64_t kit_atomic_exchange_explicit_64(volatile uint64_t *var,
+uint64_t kit_atomic_exchange_explicit_64(uint64_t volatile *var,
uint64_t value,
int memory_order);
-uint8_t kit_atomic_fetch_add_explicit_8(volatile uint8_t *var,
+uint8_t kit_atomic_fetch_add_explicit_8(uint8_t volatile *var,
uint8_t value,
int memory_order);
-uint16_t kit_atomic_fetch_add_explicit_16(volatile uint16_t *var,
+uint16_t kit_atomic_fetch_add_explicit_16(uint16_t volatile *var,
uint16_t value,
int memory_order);
-uint32_t kit_atomic_fetch_add_explicit_32(volatile uint32_t *var,
+uint32_t kit_atomic_fetch_add_explicit_32(uint32_t volatile *var,
uint32_t value,
int memory_order);
-uint64_t kit_atomic_fetch_add_explicit_64(volatile uint64_t *var,
+uint64_t kit_atomic_fetch_add_explicit_64(uint64_t volatile *var,
uint64_t value,
int memory_order);
@@ -85,19 +86,19 @@ uint64_t kit_atomic_fetch_add_explicit_64(volatile uint64_t *var,
static_assert(sizeof(value_) <= sizeof *(var_), \
"Wrong value type"); \
if (sizeof *(var_) == 1) \
- kit_atomic_store_explicit_8((volatile uint8_t *) (var_), \
+ kit_atomic_store_explicit_8((uint8_t volatile *) (var_), \
(uint8_t) (value_), \
(memory_order_)); \
if (sizeof *(var_) == 2) \
- kit_atomic_store_explicit_16((volatile uint16_t *) (var_), \
+ kit_atomic_store_explicit_16((uint16_t volatile *) (var_), \
(uint16_t) (value_), \
(memory_order_)); \
if (sizeof *(var_) == 4) \
- kit_atomic_store_explicit_32((volatile uint32_t *) (var_), \
+ kit_atomic_store_explicit_32((uint32_t volatile *) (var_), \
(uint32_t) (value_), \
(memory_order_)); \
if (sizeof *(var_) == 8) \
- kit_atomic_store_explicit_64((volatile uint64_t *) (var_), \
+ kit_atomic_store_explicit_64((uint64_t volatile *) (var_), \
(uint64_t) (value_), \
(memory_order_)); \
} while (0)
@@ -107,16 +108,16 @@ uint64_t kit_atomic_fetch_add_explicit_64(volatile uint64_t *var,
sizeof *(var_) == 3 || sizeof *(var_) == 4, \
"Wrong atomic variable type"), \
(sizeof *(var_) == 1 \
- ? kit_atomic_load_explicit_8((volatile uint8_t *) (var_), \
+ ? kit_atomic_load_explicit_8((uint8_t volatile *) (var_), \
(value_), (memory_order_)) \
: sizeof *(var_) == 2 ? kit_atomic_load_explicit_16( \
- (volatile uint16_t *) (var_), \
+ (uint16_t volatile *) (var_), \
(value_), (memory_order_)) \
: sizeof *(var_) == 4 ? kit_atomic_load_explicit_32( \
- (volatile uint32_t *) (var_), \
+ (uint32_t volatile *) (var_), \
(value_), (memory_order_)) \
: kit_atomic_load_explicit_64( \
- (volatile uint64_t *) (var_), \
+ (uint64_t volatile *) (var_), \
(value_), (memory_order_))))
# define atomic_exchange_explicit(var_, value_, memory_order_) \
@@ -126,18 +127,18 @@ uint64_t kit_atomic_fetch_add_explicit_64(volatile uint64_t *var,
static_assert(sizeof(value_) <= sizeof *(var_), \
"Wrong value type"), \
(sizeof *(var_) == 1 ? kit_atomic_exchange_explicit_8( \
- (volatile uint8_t *) (var_), \
+ (uint8_t volatile *) (var_), \
(uint8_t) (value_), (memory_order_)) \
: sizeof *(var_) == 2 \
? kit_atomic_exchange_explicit_16( \
- (volatile uint16_t *) (var_), (uint16_t) (value_), \
+ (uint16_t volatile *) (var_), (uint16_t) (value_), \
(memory_order_)) \
: sizeof *(var_) == 4 \
? kit_atomic_exchange_explicit_32( \
- (volatile uint32_t *) (var_), (uint32_t) (value_), \
+ (uint32_t volatile *) (var_), (uint32_t) (value_), \
(memory_order_)) \
: kit_atomic_exchange_explicit_64( \
- (volatile uint64_t *) (var_), (uint64_t) (value_), \
+ (uint64_t volatile *) (var_), (uint64_t) (value_), \
(memory_order_))))
# define atomic_fetch_add_explicit(var_, value_, memory_order_) \
@@ -147,18 +148,18 @@ uint64_t kit_atomic_fetch_add_explicit_64(volatile uint64_t *var,
static_assert(sizeof(value_) <= sizeof *(var_), \
"Wrong value type"), \
(sizeof *(var_) == 1 ? kit_atomic_fetch_add_explicit_8( \
- (volatile uint8_t *) (var_), \
+ (uint8_t volatile *) (var_), \
(uint8_t) (value_), (memory_order_)) \
: sizeof *(var_) == 2 \
? kit_atomic_fetch_add_explicit_16( \
- (volatile uint16_t *) (var_), (uint16_t) (value_), \
+ (uint16_t volatile *) (var_), (uint16_t) (value_), \
(memory_order_)) \
: sizeof *(var_) == 4 \
? kit_atomic_fetch_add_explicit_32( \
- (volatile uint32_t *) (var_), (uint32_t) (value_), \
+ (uint32_t volatile *) (var_), (uint32_t) (value_), \
(memory_order_)) \
: kit_atomic_fetch_add_explicit_64( \
- (volatile uint64_t *) (var_), (uint64_t) (value_), \
+ (uint64_t volatile *) (var_), (uint64_t) (value_), \
(memory_order_))))
#endif
diff --git a/source/kit/atomic.win32.c b/source/kit/atomic.win32.c
index fc5b53d..9977955 100644
--- a/source/kit/atomic.win32.c
+++ b/source/kit/atomic.win32.c
@@ -1,44 +1,185 @@
#include "atomic.h"
#ifdef _MSC_VER
+static_assert(sizeof(char) == 1);
+static_assert(sizeof(short) == 2);
+static_assert(sizeof(long) == 4);
+
# include <intrin.h>
-# ifdef _WIN64
-# pragma intrinsic(_InterlockedExchange64)
-# pragma intrinsic(_InterlockedExchangeAdd64)
+void kit_atomic_store_explicit_8(uint8_t volatile *var, uint8_t value,
+ int memory_order) {
+ char volatile *dst = (char volatile *) var;
+ char src = (char) value;
+
+ switch (memory_order) {
+ case memory_order_relaxed: *dst = src; break;
+ default: _InterlockedExchange8(dst, src);
+ }
+}
+
+void kit_atomic_store_explicit_16(uint16_t volatile *var,
+ uint16_t value, int memory_order) {
+ short volatile *dst = (short volatile *) var;
+ short src = (short) value;
+
+ switch (memory_order) {
+ case memory_order_relaxed: *dst = src; break;
+ default: _InterlockedExchange16(dst, src);
+ }
+}
+
+void kit_atomic_store_explicit_32(uint32_t volatile *var,
+ uint32_t value, int memory_order) {
+ long volatile *dst = (long volatile *) var;
+ long src = (long) value;
-# define InterlockedExchange_ _InterlockedExchange64
-# define InterlockedExchangeAdd_ _InterlockedExchangeAdd64
+ switch (memory_order) {
+ case memory_order_relaxed: *dst = src; break;
+ default: _InterlockedExchange(dst, src);
+ }
+}
+
+void kit_atomic_store_explicit_64(uint64_t volatile *var,
+ uint64_t value, int memory_order) {
+ __int64 volatile *dst = (__int64 volatile *) var;
+ __int64 src = (__int64) value;
+
+ switch (memory_order) {
+ case memory_order_relaxed: *dst = src; break;
+ default:
+# ifdef _WIN64
+ _InterlockedExchange64(dst, src);
# else
-# pragma intrinsic(_InterlockedExchange32)
-# pragma intrinsic(_InterlockedExchangeAdd32)
+ _InterlockedExchange((long volatile *) dst, (long) src);
+# endif
+ }
+}
+
+uint8_t kit_atomic_load_explicit_8(volatile uint8_t *var,
+ int memory_order) {
+ char volatile *dst = (char volatile *) var;
+
+ if (memory_order == memory_order_relaxed)
+ return (uint8_t) *dst;
+
+ return (uint8_t) _InterlockedOr8(dst, 0);
+}
+
+uint16_t kit_atomic_load_explicit_16(uint16_t volatile *var,
+ int memory_order) {
+ short volatile *dst = (short volatile *) var;
+
+ if (memory_order == memory_order_relaxed)
+ return (uint16_t) *dst;
+
+ return (uint16_t) _InterlockedOr16(dst, 0);
+}
+
+uint32_t kit_atomic_load_explicit_32(uint32_t volatile *var,
+ int memory_order) {
+ long volatile *dst = (long volatile *) var;
+
+ if (memory_order == memory_order_relaxed)
+ return (uint32_t) *dst;
+
+ return (uint32_t) _InterlockedOr(dst, 0);
+}
+
+uint64_t kit_atomic_load_explicit_64(uint64_t volatile *var,
+ int memory_order) {
+ __int64 volatile *dst = (__int64 volatile *) var;
-# define InterlockedExchange_ _InterlockedExchange32
-# define InterlockedExchangeAdd_ _InterlockedExchangeAdd32
+ if (memory_order == memory_order_relaxed)
+ return (uint64_t) *dst;
+
+# ifdef _WIN64
+ return (uint64_t) _InterlockedOr64(dst, 0);
+# else
+ return (uint64_t) _InterlockedOr((long volatile *) dst, 0);
# endif
+}
+
+uint8_t kit_atomic_exchange_explicit_8(volatile uint8_t *var,
+ uint8_t value,
+ int memory_order) {
+ char volatile *dst = (char volatile *) var;
+ char src = (char) value;
-void kit_atomic_store_explicit(volatile KIT_ATOMIC_VAR *var,
- KIT_ATOMIC_VAR value,
- int memory_order) {
- InterlockedExchange_(var, value);
+ return (uint8_t) _InterlockedExchange8(dst, src);
}
-KIT_ATOMIC_VAR kit_atomic_load_explicit(volatile KIT_ATOMIC_VAR *var,
+uint16_t kit_atomic_exchange_explicit_16(uint16_t volatile *var,
+ uint16_t value,
+ int memory_order) {
+ short volatile *dst = (short volatile *) var;
+ short src = (short) value;
+
+ return (uint16_t) _InterlockedExchange16(dst, src);
+}
+
+uint32_t kit_atomic_exchange_explicit_32(uint32_t volatile *var,
+ uint32_t value,
+ int memory_order) {
+ long volatile *dst = (long volatile *) var;
+ long src = (long) value;
+
+ return (uint32_t) _InterlockedExchange(dst, src);
+}
+
+uint64_t kit_atomic_exchange_explicit_64(uint64_t volatile *var,
+ uint64_t value,
+ int memory_order) {
+ __int64 volatile *dst = (__int64 volatile *) var;
+ __int64 src = (__int64) value;
+
+# ifdef _WIN64
+ return (uint64_t) _InterlockedExchange64(dst, src);
+# else
+ return (uint64_t) _InterlockedExchange((long volatile *) dst,
+ (long) src);
+# endif
+}
+
+uint8_t kit_atomic_fetch_add_explicit_8(volatile uint8_t *var,
+ uint8_t value,
int memory_order) {
- if (memory_order == memory_order_relaxed)
- return *var;
- return InterlockedExchangeAdd_(var, 0);
+ char volatile *dst = (char volatile *) var;
+ char src = (char) value;
+
+ return (uint8_t) _InterlockedExchangeAdd8(dst, src);
+}
+
+uint16_t kit_atomic_fetch_add_explicit_16(uint16_t volatile *var,
+ uint16_t value,
+ int memory_order) {
+ short volatile *dst = (short volatile *) var;
+ short src = (short) value;
+
+ return (uint16_t) _InterlockedExchangeAdd16(dst, src);
}
-KIT_ATOMIC_VAR kit_atomic_fetch_add_explicit(
- volatile KIT_ATOMIC_VAR *var, KIT_ATOMIC_VAR value,
- int memory_order) {
- return InterlockedExchangeAdd_(var, value);
+uint32_t kit_atomic_fetch_add_explicit_32(uint32_t volatile *var,
+ uint32_t value,
+ int memory_order) {
+ long volatile *dst = (long volatile *) var;
+ long src = (long) value;
+
+ return (uint32_t) _InterlockedExchangeAdd(dst, src);
}
-KIT_ATOMIC_VAR kit_atomic_exchange_explicit(
- volatile KIT_ATOMIC_VAR *var, KIT_ATOMIC_VAR value,
- int memory_order) {
- return InterlockedExchange_(var, value);
+uint64_t kit_atomic_fetch_add_explicit_64(uint64_t volatile *var,
+ uint64_t value,
+ int memory_order) {
+ __int64 volatile *dst = (__int64 volatile *) var;
+ __int64 src = (__int64) value;
+
+# ifdef _WIN64
+ return (uint64_t) _InterlockedExchangeAdd64(dst, src);
+# else
+ return (uint64_t) _InterlockedExchangeAdd((long volatile *) dst,
+ (long) src);
+# endif
}
+
#endif