Faster atomic loads and stores on windows
This commit is contained in:
parent
b04f2a0f01
commit
c60095a818
@ -61,19 +61,43 @@ static inline int64 my_atomic_add64(int64 volatile *a, int64 v)
|
|||||||
return (int64)InterlockedExchangeAdd64((volatile LONGLONG*)a, (LONGLONG)v);
|
return (int64)InterlockedExchangeAdd64((volatile LONGLONG*)a, (LONGLONG)v);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
According to MSDN:
|
||||||
|
|
||||||
|
Simple reads and writes to properly-aligned 32-bit variables are atomic
|
||||||
|
operations.
|
||||||
|
...
|
||||||
|
Simple reads and writes to properly aligned 64-bit variables are atomic on
|
||||||
|
64-bit Windows. Reads and writes to 64-bit values are not guaranteed to be
|
||||||
|
atomic on 32-bit Windows.
|
||||||
|
|
||||||
|
https://msdn.microsoft.com/en-us/library/windows/desktop/ms684122(v=vs.85).aspx
|
||||||
|
*/
|
||||||
|
|
||||||
static inline int32 my_atomic_load32(int32 volatile *a)
|
static inline int32 my_atomic_load32(int32 volatile *a)
|
||||||
{
|
{
|
||||||
return (int32)InterlockedCompareExchange((volatile LONG *)a, 0, 0);
|
int32 value= *a;
|
||||||
|
MemoryBarrier();
|
||||||
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int64 my_atomic_load64(int64 volatile *a)
|
static inline int64 my_atomic_load64(int64 volatile *a)
|
||||||
{
|
{
|
||||||
return (int64)InterlockedCompareExchange64((volatile LONGLONG *)a, 0, 0);
|
#ifdef _M_X64
|
||||||
|
int64 value= *a;
|
||||||
|
MemoryBarrier();
|
||||||
|
return value;
|
||||||
|
#else
|
||||||
|
return (int64) InterlockedCompareExchange64((volatile LONGLONG *) a, 0, 0);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void* my_atomic_loadptr(void * volatile *a)
|
static inline void* my_atomic_loadptr(void * volatile *a)
|
||||||
{
|
{
|
||||||
return InterlockedCompareExchangePointer(a, 0, 0);
|
void *value= *a;
|
||||||
|
MemoryBarrier();
|
||||||
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int32 my_atomic_fas32(int32 volatile *a, int32 v)
|
static inline int32 my_atomic_fas32(int32 volatile *a, int32 v)
|
||||||
@ -93,17 +117,24 @@ static inline void * my_atomic_fasptr(void * volatile *a, void * v)
|
|||||||
|
|
||||||
static inline void my_atomic_store32(int32 volatile *a, int32 v)
|
static inline void my_atomic_store32(int32 volatile *a, int32 v)
|
||||||
{
|
{
|
||||||
(void)InterlockedExchange((volatile LONG*)a, v);
|
MemoryBarrier();
|
||||||
|
*a= v;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void my_atomic_store64(int64 volatile *a, int64 v)
|
static inline void my_atomic_store64(int64 volatile *a, int64 v)
|
||||||
{
|
{
|
||||||
(void)InterlockedExchange64((volatile LONGLONG*)a, v);
|
#ifdef _M_X64
|
||||||
|
MemoryBarrier();
|
||||||
|
*a= v;
|
||||||
|
#else
|
||||||
|
(void) InterlockedExchange64((volatile LONGLONG *) a, v);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void my_atomic_storeptr(void * volatile *a, void *v)
|
static inline void my_atomic_storeptr(void * volatile *a, void *v)
|
||||||
{
|
{
|
||||||
(void)InterlockedExchangePointer(a, v);
|
MemoryBarrier();
|
||||||
|
*a= v;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user