#ifndef Atomics_h
#define Atomics_h
#include "wtf/Assertions.h"
#include "wtf/CPU.h"
#include <stdint.h>
#if COMPILER(MSVC)
#include <windows.h>
#endif
#if defined(THREAD_SANITIZER)
#include <sanitizer/tsan_interface_atomic.h>
#endif
namespace WTF {
#if COMPILER(MSVC)
ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment)
{
return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), static_cast<long>(increment)) + increment;
}
ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement)
{
return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), static_cast<long>(-decrement)) - decrement;
}
ALWAYS_INLINE int atomicIncrement(int volatile* addend) { return InterlockedIncrement(reinterpret_cast<long volatile*>(addend)); }
ALWAYS_INLINE int atomicDecrement(int volatile* addend) { return InterlockedDecrement(reinterpret_cast<long volatile*>(addend)); }
ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend)); }
ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend)); }
ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr)
{
int ret = InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 1);
ASSERT(!ret || ret == 1);
return ret;
}
ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr)
{
ASSERT(*ptr == 1);
InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 0);
}
#else
ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) { return __sync_add_and_fetch(addend, increment); }
ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) { return __sync_sub_and_fetch(addend, decrement); }
ALWAYS_INLINE int atomicIncrement(int volatile* addend) { return atomicAdd(addend, 1); }
ALWAYS_INLINE int atomicDecrement(int volatile* addend) { return atomicSubtract(addend, 1); }
ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { return __sync_add_and_fetch(addend, 1); }
ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { return __sync_sub_and_fetch(addend, 1); }
ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr)
{
int ret = __sync_lock_test_and_set(ptr, 1);
ASSERT(!ret || ret == 1);
return ret;
}
ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr)
{
ASSERT(*ptr == 1);
__sync_lock_release(ptr);
}
#endif
#if defined(THREAD_SANITIZER)
ALWAYS_INLINE void releaseStore(volatile int* ptr, int value)
{
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
ALWAYS_INLINE int acquireLoad(volatile const int* ptr)
{
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
#else
#if CPU(X86) || CPU(X86_64)
#if COMPILER(MSVC)
#define MEMORY_BARRIER()
#else
#define MEMORY_BARRIER() __asm__ __volatile__("" : : : "memory")
#endif
#elif CPU(ARM) && (OS(LINUX) || OS(ANDROID))
inline void memoryBarrier()
{
typedef void (*KernelMemoryBarrierFunc)();
((KernelMemoryBarrierFunc)0xffff0fa0)();
}
#define MEMORY_BARRIER() memoryBarrier()
#else
#define MEMORY_BARRIER() __sync_synchronize()
#endif
ALWAYS_INLINE void releaseStore(volatile int* ptr, int value)
{
MEMORY_BARRIER();
*ptr = value;
}
ALWAYS_INLINE int acquireLoad(volatile const int* ptr)
{
int value = *ptr;
MEMORY_BARRIER();
return value;
}
#undef MEMORY_BARRIER
#endif
}
using WTF::atomicAdd;
using WTF::atomicSubtract;
using WTF::atomicDecrement;
using WTF::atomicIncrement;
using WTF::atomicTestAndSetToOne;
using WTF::atomicSetOneToZero;
using WTF::acquireLoad;
using WTF::releaseStore;
#endif