|
|
@ -125,8 +125,8 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
|
|
|
// ensure that no later memory access can be reordered ahead of the operation.
|
|
|
|
// ensure that no later memory access can be reordered ahead of the operation.
|
|
|
|
// "Release" operations ensure that no previous memory access can be reordered
|
|
|
|
// "Release" operations ensure that no previous memory access can be reordered
|
|
|
|
// after the operation. "Barrier" operations have both "Acquire" and "Release"
|
|
|
|
// after the operation. "Barrier" operations have both "Acquire" and "Release"
|
|
|
|
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
|
|
|
|
// semantics. A MemoryBarrierInternal() has "Barrier" semantics, but does no
|
|
|
|
// access.
|
|
|
|
// memory access.
|
|
|
|
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
|
|
|
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
|
|
|
Atomic32 old_value, |
|
|
|
Atomic32 old_value, |
|
|
|
Atomic32 new_value) { |
|
|
|
Atomic32 new_value) { |
|
|
@ -149,17 +149,17 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
|
|
*ptr = value; |
|
|
|
*ptr = value; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
inline void MemoryBarrier() { |
|
|
|
inline void MemoryBarrierInternal() { |
|
|
|
__asm__ __volatile__("sync" : : : "memory"); |
|
|
|
__asm__ __volatile__("sync" : : : "memory"); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
|
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
|
|
*ptr = value; |
|
|
|
*ptr = value; |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
|
|
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
*ptr = value; |
|
|
|
*ptr = value; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -169,12 +169,12 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
|
|
|
|
|
|
|
|
|
|
|
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
|
|
|
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
|
|
|
Atomic32 value = *ptr; |
|
|
|
Atomic32 value = *ptr; |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
return value; |
|
|
|
return value; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
|
|
|
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
return *ptr; |
|
|
|
return *ptr; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -247,9 +247,9 @@ inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
|
|
|
|
|
|
|
|
|
|
|
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
|
|
|
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
|
|
|
Atomic64 increment) { |
|
|
|
Atomic64 increment) { |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); |
|
|
|
Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
return res; |
|
|
|
return res; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -257,20 +257,20 @@ inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
|
|
|
// ensure that no later memory access can be reordered ahead of the operation.
|
|
|
|
// ensure that no later memory access can be reordered ahead of the operation.
|
|
|
|
// "Release" operations ensure that no previous memory access can be reordered
|
|
|
|
// "Release" operations ensure that no previous memory access can be reordered
|
|
|
|
// after the operation. "Barrier" operations have both "Acquire" and "Release"
|
|
|
|
// after the operation. "Barrier" operations have both "Acquire" and "Release"
|
|
|
|
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
|
|
|
|
// semantics. A MemoryBarrierInternal() has "Barrier" semantics, but does no
|
|
|
|
// access.
|
|
|
|
// memory access.
|
|
|
|
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
|
|
|
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
|
|
|
Atomic64 old_value, |
|
|
|
Atomic64 old_value, |
|
|
|
Atomic64 new_value) { |
|
|
|
Atomic64 new_value) { |
|
|
|
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
|
|
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
return res; |
|
|
|
return res; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
|
|
|
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
|
|
|
Atomic64 old_value, |
|
|
|
Atomic64 old_value, |
|
|
|
Atomic64 new_value) { |
|
|
|
Atomic64 new_value) { |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
|
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -280,11 +280,11 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
|
|
|
|
|
|
|
|
|
|
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
|
|
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
|
|
*ptr = value; |
|
|
|
*ptr = value; |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
|
|
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
*ptr = value; |
|
|
|
*ptr = value; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -294,12 +294,12 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
|
|
|
|
|
|
|
|
|
|
|
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
|
|
|
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
|
|
|
Atomic64 value = *ptr; |
|
|
|
Atomic64 value = *ptr; |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
return value; |
|
|
|
return value; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
|
|
|
inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
|
|
|
MemoryBarrier(); |
|
|
|
MemoryBarrierInternal(); |
|
|
|
return *ptr; |
|
|
|
return *ptr; |
|
|
|
} |
|
|
|
} |
|
|
|
#endif |
|
|
|
#endif |
|
|
|