#pragma once #include "CoreTypes.h" #include "Templates/Invoke.h" #include "Memory/Alignment.h" #include "Templates/Function.h" #include "TypeTraits/TypeTraits.h" #include "Templates/Noncopyable.h" #include NAMESPACE_REDCRAFT_BEGIN NAMESPACE_MODULE_BEGIN(Redcraft) NAMESPACE_MODULE_BEGIN(Utility) /** * EMemoryOrder specifies how memory accesses, including regular, non-atomic memory accesses, * are to be ordered around an atomic operation. Absent any constraints on a multi-core system, * when multiple threads simultaneously read and write to several variables, one thread can observe * the values change in an order different from the order another thread wrote them. Indeed, * the apparent order of changes can even differ among multiple reader threads. Some similar effects * can occur even on uniprocessor systems due to compiler transformations allowed by the memory model. * * @see https://en.cppreference.com/w/cpp/atomic/memory_order */ enum class EMemoryOrder : uint8 { Relaxed = static_cast>(NAMESPACE_STD::memory_order_relaxed), Consume = static_cast>(NAMESPACE_STD::memory_order_consume), Acquire = static_cast>(NAMESPACE_STD::memory_order_acquire), Release = static_cast>(NAMESPACE_STD::memory_order_release), AcquireRelease = static_cast>(NAMESPACE_STD::memory_order_acq_rel), SequentiallyConsistent = static_cast>(NAMESPACE_STD::memory_order_seq_cst), }; #if BUILD_DEBUG NAMESPACE_PRIVATE_BEGIN FORCEINLINE void MemoryOrderCheck(EMemoryOrder Order, uint8 Require) { switch (Order) { case EMemoryOrder::Relaxed: checkf((Require) & 0x01, TEXT("Invalid memory order.")); break; case EMemoryOrder::Consume: checkf((Require) & 0x02, TEXT("Invalid memory order.")); break; case EMemoryOrder::Acquire: checkf((Require) & 0x04, TEXT("Invalid memory order.")); break; case EMemoryOrder::Release: checkf((Require) & 0x08, TEXT("Invalid memory order.")); break; case EMemoryOrder::AcquireRelease: checkf((Require) & 0x10, TEXT("Invalid memory order.")); break; case EMemoryOrder::SequentiallyConsistent: checkf((Require) & 0x20, TEXT("Invalid memory order.")); break; default: check_no_entry(); } } NAMESPACE_PRIVATE_END #define MEMORY_ORDER_CHECK(Order, Require) NAMESPACE_PRIVATE::MemoryOrderCheck(Order, Require) #else #define MEMORY_ORDER_CHECK(Order, Require) #endif NAMESPACE_PRIVATE_BEGIN template struct TAtomicImpl : FSingleton { protected: using NativeAtomicType = TConditional, NAMESPACE_STD::atomic>; public: using ValueType = T; /** Indicates that the type is always lock-free */ static constexpr bool bIsAlwaysLockFree = NativeAtomicType::is_always_lock_free; /** Indicates the required alignment of an object to be referenced by TAtomicRef. */ static constexpr size_t RequiredAlignment = NAMESPACE_STD::atomic_ref::required_alignment; /** Constructs an atomic object. */ FORCEINLINE constexpr TAtomicImpl() requires (!bIsRef) : NativeAtomic() { }; FORCEINLINE constexpr TAtomicImpl(ValueType Desired) requires (!bIsRef) : NativeAtomic(Desired) { }; /** Constructs an atomic reference. */ FORCEINLINE explicit TAtomicImpl(ValueType& Desired) requires (bIsRef) : NativeAtomic(Desired) { check(Memory::IsAligned(&Desired, RequiredAlignment)); }; FORCEINLINE TAtomicImpl(TAtomicImpl& InValue) requires (bIsRef) : NativeAtomic(InValue) { }; /** Stores a value into an atomic object. */ FORCEINLINE ValueType operator=(ValueType Desired) { return NativeAtomic = Desired; } FORCEINLINE ValueType operator=(ValueType Desired) volatile requires (bIsAlwaysLockFree) { return NativeAtomic = Desired; } /** Atomically replaces the value of the atomic object with a non-atomic argument. */ FORCEINLINE void Store(ValueType Desired, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) { MEMORY_ORDER_CHECK(Order, 0x01 | 0x08 | 0x20); NativeAtomic.store(Desired, static_cast(Order)); } FORCEINLINE void Store(ValueType Desired, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (bIsAlwaysLockFree) { MEMORY_ORDER_CHECK(Order, 0x01 | 0x08 | 0x20); NativeAtomic.store(Desired, static_cast(Order)); } /** Atomically obtains the value of the atomic object. */ NODISCARD FORCEINLINE ValueType Load(EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) const { MEMORY_ORDER_CHECK(Order, 0x01 | 0x02 | 0x04 | 0x20); return NativeAtomic.load(static_cast(Order)); } NODISCARD FORCEINLINE ValueType Load(EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) const volatile requires (bIsAlwaysLockFree) { MEMORY_ORDER_CHECK(Order, 0x01 | 0x02 | 0x04 | 0x20); return NativeAtomic.load(static_cast(Order)); } /** Loads a value from an atomic object. */ NODISCARD FORCEINLINE operator ValueType() const { return static_cast(NativeAtomic); } NODISCARD FORCEINLINE operator ValueType() const volatile requires (bIsAlwaysLockFree) { return static_cast(NativeAtomic); } /** Atomically replaces the value of the atomic object and obtains the value held previously. */ NODISCARD FORCEINLINE ValueType Exchange(ValueType Desired, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) { return NativeAtomic.exchange(Desired, static_cast(Order)); } NODISCARD FORCEINLINE ValueType Exchange(ValueType Desired, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (bIsAlwaysLockFree) { return NativeAtomic.exchange(Desired, static_cast(Order)); } /** Atomically compares the value of the atomic object with non-atomic argument and performs atomic exchange if equal or atomic load if not. */ NODISCARD FORCEINLINE bool CompareExchange(ValueType& Expected, ValueType Desired, EMemoryOrder Success, EMemoryOrder Failure, bool bIsWeak = false) { MEMORY_ORDER_CHECK(Failure, 0x01 | 0x02 | 0x04 | 0x20); if (bIsWeak) return NativeAtomic.compare_exchange_weak(Expected, Desired, static_cast(Success), static_cast(Failure)); else return NativeAtomic.compare_exchange_strong(Expected, Desired, static_cast(Success), static_cast(Failure)); } /** Atomically compares the value of the atomic object with non-atomic argument and performs atomic exchange if equal or atomic load if not. */ NODISCARD FORCEINLINE bool CompareExchange(ValueType& Expected, ValueType Desired, EMemoryOrder Success, EMemoryOrder Failure, bool bIsWeak = false) volatile requires (bIsAlwaysLockFree) { MEMORY_ORDER_CHECK(Failure, 0x01 | 0x02 | 0x04 | 0x20); if (bIsWeak) return NativeAtomic.compare_exchange_weak(Expected, Desired, static_cast(Success), static_cast(Failure)); else return NativeAtomic.compare_exchange_strong(Expected, Desired, static_cast(Success), static_cast(Failure)); } /** Atomically compares the value of the atomic object with non-atomic argument and performs atomic exchange if equal or atomic load if not. */ NODISCARD FORCEINLINE bool CompareExchange(ValueType& Expected, ValueType Desired, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent, bool bIsWeak = false) { if (bIsWeak) return NativeAtomic.compare_exchange_weak(Expected, Desired, static_cast(Order)); else return NativeAtomic.compare_exchange_strong(Expected, Desired, static_cast(Order)); } /** Atomically compares the value of the atomic object with non-atomic argument and performs atomic exchange if equal or atomic load if not. */ NODISCARD FORCEINLINE bool CompareExchange(ValueType& Expected, ValueType Desired, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent, bool bIsWeak = false) volatile requires (bIsAlwaysLockFree) { if (bIsWeak) return NativeAtomic.compare_exchange_weak(Expected, Desired, static_cast(Order)); else return NativeAtomic.compare_exchange_strong(Expected, Desired, static_cast(Order)); } /** Blocks the thread until notified and the atomic value changes. */ FORCEINLINE void Wait(ValueType Old, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) { MEMORY_ORDER_CHECK(Order, 0x01 | 0x02 | 0x04 | 0x20); NativeAtomic.wait(Old, static_cast(Order)); } FORCEINLINE void Wait(ValueType Old, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile { MEMORY_ORDER_CHECK(Order, 0x01 | 0x02 | 0x04 | 0x20); NativeAtomic.wait(Old, static_cast(Order)); } /** Notifies at least one or all threads blocked waiting on the atomic object. */ FORCEINLINE void Notify(bool bIsAll = false) { if (bIsAll) NativeAtomic.notify_all(); else NativeAtomic.notify_one(); } FORCEINLINE void Notify(bool bIsAll = false) volatile { if (bIsAll) NativeAtomic.notify_all(); else NativeAtomic.notify_one(); } /** Atomically executes the 'Func' on the value stored in the atomic object and obtains the value held previously. */ template requires (CInvocableResult) FORCEINLINE ValueType FetchFn(F&& Func, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) { ValueType Temp(Load(EMemoryOrder::Relaxed)); // We do a weak read here because we require a loop. while (!CompareExchange(Temp, InvokeResult(Forward(Func), Temp), Order, true)); return Temp; } /** Atomically executes the 'Func' on the value stored in the atomic object and obtains the value held previously. */ template requires (CInvocableResult && bIsAlwaysLockFree) FORCEINLINE ValueType FetchFn(F&& Func, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile { ValueType Temp(Load(EMemoryOrder::Relaxed)); // We do a weak read here because we require a loop. while (!CompareExchange(Temp, InvokeResult(Forward(Func), Temp), Order, true)); return Temp; } /** Atomically adds the argument to the value stored in the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchAdd(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CIntegral || CFloatingPoint) { return NativeAtomic.fetch_add(InValue, static_cast(Order)); } FORCEINLINE ValueType FetchAdd(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CIntegral || CFloatingPoint) && bIsAlwaysLockFree { return NativeAtomic.fetch_add(InValue, static_cast(Order)); } /** Atomically adds the argument to the value stored in the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchAdd(ptrdiff InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CPointer ) { return NativeAtomic.fetch_add(InValue, static_cast(Order)); } FORCEINLINE ValueType FetchAdd(ptrdiff InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CPointer && bIsAlwaysLockFree) { return NativeAtomic.fetch_add(InValue, static_cast(Order)); } /** Atomically subtracts the argument from the value stored in the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchSub(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CIntegral || CFloatingPoint) { return NativeAtomic.fetch_sub(InValue, static_cast(Order)); } FORCEINLINE ValueType FetchSub(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CIntegral || CFloatingPoint) && bIsAlwaysLockFree { return NativeAtomic.fetch_sub(InValue, static_cast(Order)); } /** Atomically subtracts the argument from the value stored in the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchSub(ptrdiff InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CPointer ) { return NativeAtomic.fetch_sub(InValue, static_cast(Order)); } FORCEINLINE ValueType FetchSub(ptrdiff InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CPointer && bIsAlwaysLockFree) { return NativeAtomic.fetch_sub(InValue, static_cast(Order)); } /** Atomically multiples the argument from the value stored in the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchMul(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CIntegral || CFloatingPoint) { return FetchFn([InValue](ValueType Old) -> ValueType { return Old * InValue; }); } FORCEINLINE ValueType FetchMul(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CIntegral || CFloatingPoint) && bIsAlwaysLockFree { return FetchFn([InValue](ValueType Old) -> ValueType { return Old * InValue; }); } /** Atomically divides the argument from the value stored in the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchDiv(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CIntegral || CFloatingPoint) { return FetchFn([InValue](ValueType Old) -> ValueType { return Old / InValue; }); } FORCEINLINE ValueType FetchDiv(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CIntegral || CFloatingPoint) && bIsAlwaysLockFree { return FetchFn([InValue](ValueType Old) -> ValueType { return Old / InValue; }); } /** Atomically models the argument from the value stored in the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchMod(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CIntegral ) { return FetchFn([InValue](ValueType Old) -> ValueType { return Old % InValue; }); } FORCEINLINE ValueType FetchMod(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CIntegral && bIsAlwaysLockFree) { return FetchFn([InValue](ValueType Old) -> ValueType { return Old % InValue; }); } /** Atomically performs bitwise AND between the argument and the value of the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchAnd(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CIntegral ) { return NativeAtomic.fetch_and(InValue, static_cast(Order)); } FORCEINLINE ValueType FetchAnd(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CIntegral && bIsAlwaysLockFree) { return NativeAtomic.fetch_and(InValue, static_cast(Order)); } /** Atomically performs bitwise OR between the argument and the value of the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchOr(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CIntegral ) { return NativeAtomic.fetch_or(InValue, static_cast(Order)); } FORCEINLINE ValueType FetchOr(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CIntegral && bIsAlwaysLockFree) { return NativeAtomic.fetch_or(InValue, static_cast(Order)); } /** Atomically performs bitwise XOR between the argument and the value of the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchXor(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CIntegral ) { return NativeAtomic.fetch_xor(InValue, static_cast(Order)); } FORCEINLINE ValueType FetchXor(ValueType InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CIntegral && bIsAlwaysLockFree) { return NativeAtomic.fetch_xor(InValue, static_cast(Order)); } /** Atomically performs bitwise LSH between the argument and the value of the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchLsh(size_t InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CIntegral ) { return FetchFn([InValue](ValueType Old) -> ValueType { return Old << InValue; }); } FORCEINLINE ValueType FetchLsh(size_t InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CIntegral && bIsAlwaysLockFree) { return FetchFn([InValue](ValueType Old) -> ValueType { return Old << InValue; }); } /** Atomically performs bitwise RSH between the argument and the value of the atomic object and obtains the value held previously. */ FORCEINLINE ValueType FetchRsh(size_t InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) requires (CIntegral ) { return FetchFn([InValue](ValueType Old) -> ValueType { return Old >> InValue; }); } FORCEINLINE ValueType FetchRsh(size_t InValue, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile requires (CIntegral && bIsAlwaysLockFree) { return FetchFn([InValue](ValueType Old) -> ValueType { return Old >> InValue; }); } /** Increments the atomic value by one. */ FORCEINLINE ValueType operator++() requires ((CIntegral || CPointer) ) { return ++NativeAtomic; } FORCEINLINE ValueType operator++() volatile requires ((CIntegral || CPointer) && bIsAlwaysLockFree) { return ++NativeAtomic; } /** Increments the atomic value by one. */ FORCEINLINE ValueType operator++(int) requires ((CIntegral || CPointer) ) { return NativeAtomic++; } FORCEINLINE ValueType operator++(int) volatile requires ((CIntegral || CPointer) && bIsAlwaysLockFree) { return NativeAtomic++; } /** Decrements the atomic value by one. */ FORCEINLINE ValueType operator--() requires ((CIntegral || CPointer) ) { return --NativeAtomic; } FORCEINLINE ValueType operator--() volatile requires ((CIntegral || CPointer) && bIsAlwaysLockFree) { return --NativeAtomic; } /** Decrements the atomic value by one. */ FORCEINLINE ValueType operator--(int) requires ((CIntegral || CPointer) ) { return NativeAtomic--; } FORCEINLINE ValueType operator--(int) volatile requires ((CIntegral || CPointer) && bIsAlwaysLockFree) { return NativeAtomic--; } /** Adds with the atomic value. */ FORCEINLINE ValueType operator+=(ValueType InValue) requires ((CIntegral || CFloatingPoint) ) { return NativeAtomic += InValue; } FORCEINLINE ValueType operator+=(ValueType InValue) volatile requires ((CIntegral || CFloatingPoint) && bIsAlwaysLockFree) { return NativeAtomic += InValue; } /** Adds with the atomic value. */ FORCEINLINE ValueType operator+=(ptrdiff InValue) requires (CPointer ) { return NativeAtomic += InValue; } FORCEINLINE ValueType operator+=(ptrdiff InValue) volatile requires (CPointer && bIsAlwaysLockFree) { return NativeAtomic += InValue; } /** Subtracts with the atomic value. */ FORCEINLINE ValueType operator-=(ValueType InValue) requires ((CIntegral || CFloatingPoint) ) { return NativeAtomic -= InValue; } FORCEINLINE ValueType operator-=(ValueType InValue) volatile requires ((CIntegral || CFloatingPoint) && bIsAlwaysLockFree) { return NativeAtomic -= InValue; } /** Subtracts with the atomic value. */ FORCEINLINE ValueType operator-=(ptrdiff InValue) requires (CPointer ) { return NativeAtomic -= InValue; } FORCEINLINE ValueType operator-=(ptrdiff InValue) volatile requires (CPointer && bIsAlwaysLockFree) { return NativeAtomic -= InValue; } /** Multiples with the atomic value. */ FORCEINLINE ValueType operator*=(ValueType InValue) requires ((CIntegral || CFloatingPoint) ) { return FetchMul(InValue) * InValue; } FORCEINLINE ValueType operator*=(ValueType InValue) volatile requires ((CIntegral || CFloatingPoint) && bIsAlwaysLockFree) { return FetchMul(InValue) * InValue; } /** Divides with the atomic value. */ FORCEINLINE ValueType operator/=(ValueType InValue) requires ((CIntegral || CFloatingPoint) ) { return FetchDiv(InValue) / InValue; } FORCEINLINE ValueType operator/=(ValueType InValue) volatile requires ((CIntegral || CFloatingPoint) && bIsAlwaysLockFree) { return FetchDiv(InValue) / InValue; } /** Models with the atomic value. */ FORCEINLINE ValueType operator%=(ValueType InValue) requires (CIntegral ) { return FetchMod(InValue) % InValue; } FORCEINLINE ValueType operator%=(ValueType InValue) volatile requires (CIntegral && bIsAlwaysLockFree) { return FetchMod(InValue) % InValue; } /** Performs bitwise AND with the atomic value. */ FORCEINLINE ValueType operator&=(ValueType InValue) requires (CIntegral ) { return NativeAtomic &= InValue; } FORCEINLINE ValueType operator&=(ValueType InValue) volatile requires (CIntegral && bIsAlwaysLockFree) { return NativeAtomic &= InValue; } /** Performs bitwise OR with the atomic value. */ FORCEINLINE ValueType operator|=(ValueType InValue) requires (CIntegral ) { return NativeAtomic |= InValue; } FORCEINLINE ValueType operator|=(ValueType InValue) volatile requires (CIntegral && bIsAlwaysLockFree) { return NativeAtomic |= InValue; } /** Performs bitwise XOR with the atomic value. */ FORCEINLINE ValueType operator^=(ValueType InValue) requires (CIntegral ) { return NativeAtomic ^= InValue; } FORCEINLINE ValueType operator^=(ValueType InValue) volatile requires (CIntegral && bIsAlwaysLockFree) { return NativeAtomic ^= InValue; } /** Performs bitwise LSH with the atomic value. */ FORCEINLINE ValueType operator<<=(size_t InValue) requires (CIntegral ) { return FetchLsh(InValue) << InValue; } FORCEINLINE ValueType operator<<=(size_t InValue) volatile requires (CIntegral && bIsAlwaysLockFree) { return FetchLsh(InValue) << InValue; } /** Performs bitwise RSH with the atomic value. */ FORCEINLINE ValueType operator>>=(size_t InValue) requires (CIntegral ) { return FetchRsh(InValue) >> InValue; } FORCEINLINE ValueType operator>>=(size_t InValue) volatile requires (CIntegral && bIsAlwaysLockFree) { return FetchRsh(InValue) >> InValue; } protected: NativeAtomicType NativeAtomic; }; NAMESPACE_PRIVATE_END template requires (CTriviallyCopyable && CCopyConstructible && CMoveConstructible && CCopyAssignable && CMoveAssignable) struct TAtomic final : STRONG_INHERIT(NAMESPACE_PRIVATE::TAtomicImpl); template requires (CTriviallyCopyable) struct TAtomicRef final : STRONG_INHERIT(NAMESPACE_PRIVATE::TAtomicImpl); template TAtomic(T) -> TAtomic; template TAtomicRef(T&) -> TAtomicRef; static_assert(sizeof(TAtomic) == sizeof(int32), "The byte size of TAtomic is unexpected"); /** * FAtomicFlag is an atomic boolean type. Unlike all specializations of TAtomic, it is guaranteed to be lock-free. * Unlike TAtomic, FAtomicFlag does not provide load or store operations. */ struct FAtomicFlag final : FSingleton { public: /** Constructs an atomic flag. */ FORCEINLINE constexpr FAtomicFlag() : NativeAtomic() { }; /** Atomically sets flag to false. */ FORCEINLINE void Clear(EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) { MEMORY_ORDER_CHECK(Order, 0x01 | 0x08 | 0x20); NativeAtomic.clear(static_cast(Order)); } FORCEINLINE void Clear(EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile { MEMORY_ORDER_CHECK(Order, 0x01 | 0x08 | 0x20); NativeAtomic.clear(static_cast(Order)); } /** Atomically sets the flag to true and obtains its previous value. */ FORCEINLINE bool TestAndSet(EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) { return NativeAtomic.test_and_set(static_cast(Order)); } FORCEINLINE bool TestAndSet(EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) volatile { return NativeAtomic.test_and_set(static_cast(Order)); } /** Atomically returns the value of the flag. */ NODISCARD FORCEINLINE bool Test(EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) const { MEMORY_ORDER_CHECK(Order, 0x01 | 0x02 | 0x04 | 0x20); return NativeAtomic.test(static_cast(Order)); } NODISCARD FORCEINLINE bool Test(EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) const volatile { MEMORY_ORDER_CHECK(Order, 0x01 | 0x02 | 0x04 | 0x20); return NativeAtomic.test(static_cast(Order)); } /** Blocks the thread until notified and the atomic value changes. */ FORCEINLINE void Wait(bool Old, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) const { MEMORY_ORDER_CHECK(Order, 0x01 | 0x02 | 0x04 | 0x20); const_cast(NativeAtomic).wait(Old, static_cast(Order)); } FORCEINLINE void Wait(bool Old, EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) const volatile { MEMORY_ORDER_CHECK(Order, 0x01 | 0x02 | 0x04 | 0x20); const_cast(NativeAtomic).wait(Old, static_cast(Order)); } /** Notifies at least one or all threads blocked waiting on the atomic object. */ FORCEINLINE void Notify(bool bIsAll = false) { if (bIsAll) const_cast(NativeAtomic).notify_all(); else const_cast(NativeAtomic).notify_one(); } FORCEINLINE void Notify(bool bIsAll = false) volatile { if (bIsAll) const_cast(NativeAtomic).notify_all(); else const_cast(NativeAtomic).notify_one(); } private: NAMESPACE_STD::atomic_flag NativeAtomic; }; template NODISCARD inline T KillDependency(T InValue) { T Temp(InValue); return Temp; } extern "C" FORCEINLINE void AtomicThreadFence(EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) { NAMESPACE_STD::atomic_thread_fence(static_cast(Order)); } extern "C" FORCEINLINE void AtomicSignalFence(EMemoryOrder Order = EMemoryOrder::SequentiallyConsistent) { NAMESPACE_STD::atomic_signal_fence(static_cast(Order)); } #undef MEMORY_ORDER_CHECK NAMESPACE_MODULE_END(Utility) NAMESPACE_MODULE_END(Redcraft) NAMESPACE_REDCRAFT_END