From b53edfe94790a875ab873b880516326b88a3a751 Mon Sep 17 00:00:00 2001 From: Mitya Selivanov <0x7fffff@guattari.ru> Date: Wed, 31 Aug 2022 02:18:29 +0400 Subject: [atomic] win32 test --- source/kit/CMakeLists.txt | 10 +- source/kit/atomic.c | 44 ------- source/kit/atomic.h | 160 ++++++++++++++++++++---- source/kit/atomic.win32.c | 44 +++++++ source/kit/condition_variable.c | 1 + source/kit/condition_variable.h | 6 + source/kit/mutex.c | 1 + source/kit/mutex.h | 6 + source/test/unittests/atomic.test.c | 159 +++++++++++++++++++++++ source/test/unittests/condition_variable.test.c | 2 +- source/test/unittests/mutex.test.c | 34 ++--- 11 files changed, 378 insertions(+), 89 deletions(-) delete mode 100644 source/kit/atomic.c create mode 100644 source/kit/atomic.win32.c create mode 100644 source/kit/condition_variable.c create mode 100644 source/kit/condition_variable.h create mode 100644 source/kit/mutex.c create mode 100644 source/kit/mutex.h (limited to 'source') diff --git a/source/kit/CMakeLists.txt b/source/kit/CMakeLists.txt index a8aaf02..7bc3ba5 100644 --- a/source/kit/CMakeLists.txt +++ b/source/kit/CMakeLists.txt @@ -1,17 +1,19 @@ target_sources( ${KIT_LIBRARY} PRIVATE - atomic.c input_buffer.c threads.win32.c time.c - threads.posix.c move_back.c input_stream.c lower_bound.c string_ref.c - async_function.c allocator.c array_ref.c dynamic_array.c - mersenne_twister_64.c + input_buffer.c threads.win32.c time.c atomic.win32.c + threads.posix.c condition_variable.c move_back.c input_stream.c + lower_bound.c string_ref.c async_function.c allocator.c array_ref.c + dynamic_array.c mutex.c mersenne_twister_64.c PUBLIC + $ $ $ $ $ $ $ + $ $ $ $ diff --git a/source/kit/atomic.c b/source/kit/atomic.c deleted file mode 100644 index fc5b53d..0000000 --- a/source/kit/atomic.c +++ /dev/null @@ -1,44 +0,0 @@ -#include "atomic.h" - -#ifdef _MSC_VER -# include - -# ifdef _WIN64 -# pragma intrinsic(_InterlockedExchange64) -# pragma intrinsic(_InterlockedExchangeAdd64) - -# define InterlockedExchange_ _InterlockedExchange64 -# define InterlockedExchangeAdd_ _InterlockedExchangeAdd64 -# else -# pragma intrinsic(_InterlockedExchange32) -# pragma intrinsic(_InterlockedExchangeAdd32) - -# define InterlockedExchange_ _InterlockedExchange32 -# define InterlockedExchangeAdd_ _InterlockedExchangeAdd32 -# endif - -void kit_atomic_store_explicit(volatile KIT_ATOMIC_VAR *var, - KIT_ATOMIC_VAR value, - int memory_order) { - InterlockedExchange_(var, value); -} - -KIT_ATOMIC_VAR kit_atomic_load_explicit(volatile KIT_ATOMIC_VAR *var, - int memory_order) { - if (memory_order == memory_order_relaxed) - return *var; - return InterlockedExchangeAdd_(var, 0); -} - -KIT_ATOMIC_VAR kit_atomic_fetch_add_explicit( - volatile KIT_ATOMIC_VAR *var, KIT_ATOMIC_VAR value, - int memory_order) { - return InterlockedExchangeAdd_(var, value); -} - -KIT_ATOMIC_VAR kit_atomic_exchange_explicit( - volatile KIT_ATOMIC_VAR *var, KIT_ATOMIC_VAR value, - int memory_order) { - return InterlockedExchange_(var, value); -} -#endif diff --git a/source/kit/atomic.h b/source/kit/atomic.h index 4b3e720..5763808 100644 --- a/source/kit/atomic.h +++ b/source/kit/atomic.h @@ -9,7 +9,6 @@ extern "C" { #ifndef _MSC_VER # include - # define KIT_ATOMIC(type_) _Atomic type_ #else enum { @@ -20,32 +19,147 @@ enum { memory_order_acq_rel, memory_order_seq_cst }; -# ifdef _WIN64 -# define KIT_ATOMIC_VAR int64_t -# else -# define KIT_ATOMIC_VAR int32_t -# endif -# define KIT_ATOMIC(type_) volatile KIT_ATOMIC_VAR - -void kit_atomic_store_explicit(volatile KIT_ATOMIC_VAR *var, - KIT_ATOMIC_VAR value, - int memory_order); - -KIT_ATOMIC_VAR kit_atomic_load_explicit(volatile KIT_ATOMIC_VAR *var, +# define KIT_ATOMIC(type_) volatile type_ + +void kit_atomic_store_explicit_8(volatile uint8_t *var, uint8_t value, + int memory_order); + +void kit_atomic_store_explicit_16(volatile uint16_t *var, + uint16_t value, int memory_order); + +void kit_atomic_store_explicit_32(volatile uint32_t *var, + uint32_t value, int memory_order); + +void kit_atomic_store_explicit_64(volatile uint64_t *var, + uint64_t value, int memory_order); + +uint8_t kit_atomic_load_explicit_8(volatile uint8_t *var, + int memory_order); + +uint16_t kit_atomic_load_explicit_16(volatile uint16_t *var, + int memory_order); + +uint32_t kit_atomic_load_explicit_32(volatile uint32_t *var, + int memory_order); + +uint64_t kit_atomic_load_explicit_64(volatile uint64_t *var, + int memory_order); + +uint8_t kit_atomic_exchange_explicit_8(volatile uint8_t *var, + uint8_t value, + int memory_order); + +uint16_t kit_atomic_exchange_explicit_16(volatile uint16_t *var, + uint16_t value, + int memory_order); + +uint32_t kit_atomic_exchange_explicit_32(volatile uint32_t *var, + uint32_t value, + int memory_order); + +uint64_t kit_atomic_exchange_explicit_64(volatile uint64_t *var, + uint64_t value, + int memory_order); + +uint8_t kit_atomic_fetch_add_explicit_8(volatile uint8_t *var, + uint8_t value, int memory_order); -KIT_ATOMIC_VAR kit_atomic_fetch_add_explicit( - volatile KIT_ATOMIC_VAR *var, KIT_ATOMIC_VAR value, - int memory_order); +uint16_t kit_atomic_fetch_add_explicit_16(volatile uint16_t *var, + uint16_t value, + int memory_order); + +uint32_t kit_atomic_fetch_add_explicit_32(volatile uint32_t *var, + uint32_t value, + int memory_order); + +uint64_t kit_atomic_fetch_add_explicit_64(volatile uint64_t *var, + uint64_t value, + int memory_order); + +# define atomic_store_explicit(var_, value_, memory_order_) \ + do { \ + static_assert(sizeof *(var_) == 1 || sizeof *(var_) == 2 || \ + sizeof *(var_) == 3 || sizeof *(var_) == 4, \ + "Wrong atomic variable type"); \ + static_assert(sizeof(value_) <= sizeof *(var_), \ + "Wrong value type"); \ + if (sizeof *(var_) == 1) \ + kit_atomic_store_explicit_8((volatile uint8_t *) (var_), \ + (uint8_t) (value_), \ + (memory_order_)); \ + if (sizeof *(var_) == 2) \ + kit_atomic_store_explicit_16((volatile uint16_t *) (var_), \ + (uint16_t) (value_), \ + (memory_order_)); \ + if (sizeof *(var_) == 4) \ + kit_atomic_store_explicit_32((volatile uint32_t *) (var_), \ + (uint32_t) (value_), \ + (memory_order_)); \ + if (sizeof *(var_) == 8) \ + kit_atomic_store_explicit_64((volatile uint64_t *) (var_), \ + (uint64_t) (value_), \ + (memory_order_)); \ + } while (0) + +# define atomic_load_explicit(var_, memory_order_) \ + (static_assert(sizeof *(var_) == 1 || sizeof *(var_) == 2 || \ + sizeof *(var_) == 3 || sizeof *(var_) == 4, \ + "Wrong atomic variable type"), \ + (sizeof *(var_) == 1 \ + ? kit_atomic_load_explicit_8((volatile uint8_t *) (var_), \ + (value_), (memory_order_)) \ + : sizeof *(var_) == 2 ? kit_atomic_load_explicit_16( \ + (volatile uint16_t *) (var_), \ + (value_), (memory_order_)) \ + : sizeof *(var_) == 4 ? kit_atomic_load_explicit_32( \ + (volatile uint32_t *) (var_), \ + (value_), (memory_order_)) \ + : kit_atomic_load_explicit_64( \ + (volatile uint64_t *) (var_), \ + (value_), (memory_order_)))) -KIT_ATOMIC_VAR kit_atomic_exchange_explicit( - volatile KIT_ATOMIC_VAR *var, KIT_ATOMIC_VAR value, - int memory_order); +# define atomic_exchange_explicit(var_, value_, memory_order_) \ + (static_assert(sizeof *(var_) == 1 || sizeof *(var_) == 2 || \ + sizeof *(var_) == 3 || sizeof *(var_) == 4, \ + "Wrong atomic variable type"), \ + static_assert(sizeof(value_) <= sizeof *(var_), \ + "Wrong value type"), \ + (sizeof *(var_) == 1 ? kit_atomic_exchange_explicit_8( \ + (volatile uint8_t *) (var_), \ + (uint8_t) (value_), (memory_order_)) \ + : sizeof *(var_) == 2 \ + ? kit_atomic_exchange_explicit_16( \ + (volatile uint16_t *) (var_), (uint16_t) (value_), \ + (memory_order_)) \ + : sizeof *(var_) == 4 \ + ? kit_atomic_exchange_explicit_32( \ + (volatile uint32_t *) (var_), (uint32_t) (value_), \ + (memory_order_)) \ + : kit_atomic_exchange_explicit_64( \ + (volatile uint64_t *) (var_), (uint64_t) (value_), \ + (memory_order_)))) -# define atomic_store_explicit kit_atomic_store_explicit -# define atomic_load_explicit kit_atomic_load_explicit -# define atomic_fetch_add_explicit kit_atomic_fetch_add_explicit -# define atomic_exchange_explicit kit_atomic_exchange_explicit +# define atomic_fetch_add_explicit(var_, value_, memory_order_) \ + (static_assert(sizeof *(var_) == 1 || sizeof *(var_) == 2 || \ + sizeof *(var_) == 3 || sizeof *(var_) == 4, \ + "Wrong atomic variable type"), \ + static_assert(sizeof(value_) <= sizeof *(var_), \ + "Wrong value type"), \ + (sizeof *(var_) == 1 ? kit_atomic_fetch_add_explicit_8( \ + (volatile uint8_t *) (var_), \ + (uint8_t) (value_), (memory_order_)) \ + : sizeof *(var_) == 2 \ + ? kit_atomic_fetch_add_explicit_16( \ + (volatile uint16_t *) (var_), (uint16_t) (value_), \ + (memory_order_)) \ + : sizeof *(var_) == 4 \ + ? kit_atomic_fetch_add_explicit_32( \ + (volatile uint32_t *) (var_), (uint32_t) (value_), \ + (memory_order_)) \ + : kit_atomic_fetch_add_explicit_64( \ + (volatile uint64_t *) (var_), (uint64_t) (value_), \ + (memory_order_)))) #endif #ifndef KIT_DISABLE_SHORT_NAMES diff --git a/source/kit/atomic.win32.c b/source/kit/atomic.win32.c new file mode 100644 index 0000000..fc5b53d --- /dev/null +++ b/source/kit/atomic.win32.c @@ -0,0 +1,44 @@ +#include "atomic.h" + +#ifdef _MSC_VER +# include + +# ifdef _WIN64 +# pragma intrinsic(_InterlockedExchange64) +# pragma intrinsic(_InterlockedExchangeAdd64) + +# define InterlockedExchange_ _InterlockedExchange64 +# define InterlockedExchangeAdd_ _InterlockedExchangeAdd64 +# else +# pragma intrinsic(_InterlockedExchange32) +# pragma intrinsic(_InterlockedExchangeAdd32) + +# define InterlockedExchange_ _InterlockedExchange32 +# define InterlockedExchangeAdd_ _InterlockedExchangeAdd32 +# endif + +void kit_atomic_store_explicit(volatile KIT_ATOMIC_VAR *var, + KIT_ATOMIC_VAR value, + int memory_order) { + InterlockedExchange_(var, value); +} + +KIT_ATOMIC_VAR kit_atomic_load_explicit(volatile KIT_ATOMIC_VAR *var, + int memory_order) { + if (memory_order == memory_order_relaxed) + return *var; + return InterlockedExchangeAdd_(var, 0); +} + +KIT_ATOMIC_VAR kit_atomic_fetch_add_explicit( + volatile KIT_ATOMIC_VAR *var, KIT_ATOMIC_VAR value, + int memory_order) { + return InterlockedExchangeAdd_(var, value); +} + +KIT_ATOMIC_VAR kit_atomic_exchange_explicit( + volatile KIT_ATOMIC_VAR *var, KIT_ATOMIC_VAR value, + int memory_order) { + return InterlockedExchange_(var, value); +} +#endif diff --git a/source/kit/condition_variable.c b/source/kit/condition_variable.c new file mode 100644 index 0000000..b18e402 --- /dev/null +++ b/source/kit/condition_variable.c @@ -0,0 +1 @@ +#include "condition_variable.h" diff --git a/source/kit/condition_variable.h b/source/kit/condition_variable.h new file mode 100644 index 0000000..dd3b891 --- /dev/null +++ b/source/kit/condition_variable.h @@ -0,0 +1,6 @@ +#ifndef KIT_CONDITION_VARIABLE_H +#define KIT_CONDITION_VARIABLE_H + +#include "threads.h" + +#endif diff --git a/source/kit/mutex.c b/source/kit/mutex.c new file mode 100644 index 0000000..320d4c7 --- /dev/null +++ b/source/kit/mutex.c @@ -0,0 +1 @@ +#include "mutex.h" diff --git a/source/kit/mutex.h b/source/kit/mutex.h new file mode 100644 index 0000000..0b7c548 --- /dev/null +++ b/source/kit/mutex.h @@ -0,0 +1,6 @@ +#ifndef KIT_MUTEX_H +#define KIT_MUTEX_H + +#include "threads.h" + +#endif diff --git a/source/test/unittests/atomic.test.c b/source/test/unittests/atomic.test.c index 9f1a10b..8f6d22b 100644 --- a/source/test/unittests/atomic.test.c +++ b/source/test/unittests/atomic.test.c @@ -1,4 +1,5 @@ #include "../../kit/atomic.h" +#include "../../kit/threads.h" #define KIT_TEST_FILE atomic #include "../../kit_test/test.h" @@ -24,3 +25,161 @@ TEST("atomic fetch add") { memory_order_relaxed) == 20); REQUIRE(atomic_load_explicit(&value, memory_order_relaxed) == 42); } + +enum { THREAD_COUNT = 20, TICK_COUNT = 10000 }; + +static int test_8_(void *p) { + ATOMIC(int8_t) *x = (ATOMIC(int8_t) *) p; + + for (ptrdiff_t i = 0; i < TICK_COUNT; i++) { + atomic_fetch_add_explicit(x, 20, memory_order_relaxed); + thrd_yield(); + atomic_fetch_add_explicit(x, 22, memory_order_relaxed); + thrd_yield(); + atomic_fetch_add_explicit(x, -42, memory_order_relaxed); + thrd_yield(); + } +} + +TEST("atomic types") { + ATOMIC(int8_t) byte; + ATOMIC(int16_t) i16; + ATOMIC(int32_t) i32; + ATOMIC(int64_t) i64; + + atomic_store_explicit(&byte, 42, memory_order_relaxed); + atomic_store_explicit(&i16, 4242, memory_order_relaxed); + atomic_store_explicit(&i32, 42424242, memory_order_relaxed); + atomic_store_explicit(&i64, 4242424242424242ll, + memory_order_relaxed); + + atomic_fetch_add_explicit(&byte, -20, memory_order_relaxed); + atomic_fetch_add_explicit(&i16, -2020, memory_order_relaxed); + atomic_fetch_add_explicit(&i32, -20202020, memory_order_relaxed); + atomic_fetch_add_explicit(&i64, -2020202020202020ll, + memory_order_relaxed); + + REQUIRE(atomic_exchange_explicit(&byte, 0, memory_order_relaxed) == + 22); + REQUIRE(atomic_exchange_explicit(&i16, 0, memory_order_relaxed) == + 2222); + REQUIRE(atomic_exchange_explicit(&i32, 0, memory_order_relaxed) == + 22222222); + REQUIRE(atomic_exchange_explicit(&i64, 0, memory_order_relaxed) == + 2222222222222222ll); + + REQUIRE(atomic_load_explicit(&byte, memory_order_relaxed) == 0); + REQUIRE(atomic_load_explicit(&i16, memory_order_relaxed) == 0); + REQUIRE(atomic_load_explicit(&i32, memory_order_relaxed) == 0); + REQUIRE(atomic_load_explicit(&i64, memory_order_relaxed) == 0ll); +} + +TEST("atomic byte concurrency") { + ATOMIC(int8_t) foo; + ATOMIC(int8_t) bar; + + atomic_store_explicit(&foo, 42, memory_order_relaxed); + atomic_store_explicit(&bar, 43, memory_order_relaxed); + + thrd_t threads[THREAD_COUNT]; + for (ptrdiff_t i = 0; i < THREAD_COUNT; i++) + thrd_create(threads + i, test_8_, (i % 2) ? &foo : &bar); + for (ptrdiff_t i = 0; i < THREAD_COUNT; i++) + thrd_join(threads[i], NULL); + + REQUIRE(atomic_load_explicit(&foo, memory_order_relaxed) == 42); + REQUIRE(atomic_load_explicit(&bar, memory_order_relaxed) == 43); +} + +static int test_16_(void *p) { + ATOMIC(int16_t) *x = (ATOMIC(int16_t) *) p; + + for (ptrdiff_t i = 0; i < TICK_COUNT; i++) { + atomic_fetch_add_explicit(x, 2020, memory_order_relaxed); + thrd_yield(); + atomic_fetch_add_explicit(x, 2222, memory_order_relaxed); + thrd_yield(); + atomic_fetch_add_explicit(x, -4242, memory_order_relaxed); + thrd_yield(); + } +} + +TEST("atomic int16 concurrency") { + ATOMIC(int16_t) foo; + ATOMIC(int16_t) bar; + + atomic_store_explicit(&foo, 42, memory_order_relaxed); + atomic_store_explicit(&bar, 43, memory_order_relaxed); + + thrd_t threads[THREAD_COUNT]; + for (ptrdiff_t i = 0; i < THREAD_COUNT; i++) + thrd_create(threads + i, test_16_, (i % 2) ? &foo : &bar); + for (ptrdiff_t i = 0; i < THREAD_COUNT; i++) + thrd_join(threads[i], NULL); + + REQUIRE(atomic_load_explicit(&foo, memory_order_relaxed) == 42); + REQUIRE(atomic_load_explicit(&bar, memory_order_relaxed) == 43); +} + +static int test_32_(void *p) { + ATOMIC(int32_t) *x = (ATOMIC(int32_t) *) p; + + for (ptrdiff_t i = 0; i < TICK_COUNT; i++) { + atomic_fetch_add_explicit(x, 202020, memory_order_relaxed); + thrd_yield(); + atomic_fetch_add_explicit(x, 222222, memory_order_relaxed); + thrd_yield(); + atomic_fetch_add_explicit(x, -424242, memory_order_relaxed); + thrd_yield(); + } +} + +TEST("atomic int32 concurrency") { + ATOMIC(int32_t) foo; + ATOMIC(int32_t) bar; + + atomic_store_explicit(&foo, 42, memory_order_relaxed); + atomic_store_explicit(&bar, 43, memory_order_relaxed); + + thrd_t threads[THREAD_COUNT]; + for (ptrdiff_t i = 0; i < THREAD_COUNT; i++) + thrd_create(threads + i, test_32_, (i % 2) ? &foo : &bar); + for (ptrdiff_t i = 0; i < THREAD_COUNT; i++) + thrd_join(threads[i], NULL); + + REQUIRE(atomic_load_explicit(&foo, memory_order_relaxed) == 42); + REQUIRE(atomic_load_explicit(&bar, memory_order_relaxed) == 43); +} + +static int test_64_(void *p) { + ATOMIC(int64_t) *x = (ATOMIC(int64_t) *) p; + + for (ptrdiff_t i = 0; i < TICK_COUNT; i++) { + atomic_fetch_add_explicit(x, 20202020202020ll, + memory_order_relaxed); + thrd_yield(); + atomic_fetch_add_explicit(x, 22222222222222ll, + memory_order_relaxed); + thrd_yield(); + atomic_fetch_add_explicit(x, -42424242424242ll, + memory_order_relaxed); + thrd_yield(); + } +} + +TEST("atomic int64 concurrency") { + ATOMIC(int64_t) foo; + ATOMIC(int64_t) bar; + + atomic_store_explicit(&foo, 42, memory_order_relaxed); + atomic_store_explicit(&bar, 43, memory_order_relaxed); + + thrd_t threads[THREAD_COUNT]; + for (ptrdiff_t i = 0; i < THREAD_COUNT; i++) + thrd_create(threads + i, test_64_, (i % 2) ? &foo : &bar); + for (ptrdiff_t i = 0; i < THREAD_COUNT; i++) + thrd_join(threads[i], NULL); + + REQUIRE(atomic_load_explicit(&foo, memory_order_relaxed) == 42); + REQUIRE(atomic_load_explicit(&bar, memory_order_relaxed) == 43); +} diff --git a/source/test/unittests/condition_variable.test.c b/source/test/unittests/condition_variable.test.c index 93389e9..c855b0c 100644 --- a/source/test/unittests/condition_variable.test.c +++ b/source/test/unittests/condition_variable.test.c @@ -1,4 +1,4 @@ -#include "../../kit/threads.h" +#include "../../kit/condition_variable.h" #define KIT_TEST_FILE condition_variable #include "../../kit_test/test.h" diff --git a/source/test/unittests/mutex.test.c b/source/test/unittests/mutex.test.c index d7b209b..7cce6db 100644 --- a/source/test/unittests/mutex.test.c +++ b/source/test/unittests/mutex.test.c @@ -1,9 +1,9 @@ -#include "../../kit/threads.h" +#include "../../kit/mutex.h" #define KIT_TEST_FILE mutex #include "../../kit_test/test.h" -enum { THREAD_COUNT = 200 }; +enum { SLEEP = 200000000, TICK_COUNT = 200, THREAD_COUNT = 100 }; typedef struct { mtx_t lock; @@ -12,7 +12,7 @@ typedef struct { static int test_run(void *data) { test_data_t *x = (test_data_t *) data; - for (int i = 0; i < 1000; i++) { + for (int i = 0; i < TICK_COUNT; i++) { mtx_lock(&x->lock); x->value += i; @@ -28,18 +28,6 @@ static int test_run(void *data) { return 0; } -int test_lock_for_2_sec(void *data) { - mtx_t *m = (mtx_t *) data; - mtx_lock(m); - - struct timespec sec = { .tv_sec = 2, .tv_nsec = 0 }; - thrd_sleep(&sec, NULL); - - mtx_unlock(m); - - return 0; -} - TEST("mutex lock") { test_data_t data; thrd_t pool[THREAD_COUNT]; @@ -55,14 +43,26 @@ TEST("mutex lock") { REQUIRE(data.value == 42); } +static int test_lock(void *data) { + mtx_t *m = (mtx_t *) data; + mtx_lock(m); + + struct timespec sec = { .tv_sec = 0, .tv_nsec = SLEEP }; + thrd_sleep(&sec, NULL); + + mtx_unlock(m); + + return 0; +} + TEST("mutex try lock") { mtx_t m; REQUIRE(mtx_init(&m, mtx_plain) == thrd_success); thrd_t t; - REQUIRE(thrd_create(&t, test_lock_for_2_sec, &m) == thrd_success); + REQUIRE(thrd_create(&t, test_lock, &m) == thrd_success); - struct timespec sec = { .tv_sec = 1, .tv_nsec = 0 }; + struct timespec sec = { .tv_sec = 0, .tv_nsec = SLEEP / 2 }; REQUIRE(thrd_sleep(&sec, NULL) == thrd_success); REQUIRE(mtx_trylock(&m) == thrd_busy); -- cgit v1.2.3