parent
cb6dd4ef5f
commit
f5de486b53
13 changed files with 1660 additions and 0 deletions
@ -0,0 +1,179 @@ |
|||||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// The routines exported by this module are subtle. If you use them, even if
|
||||||
|
// you get the code right, it will depend on careful reasoning about atomicity
|
||||||
|
// and memory ordering; it will be less readable, and harder to maintain. If
|
||||||
|
// you plan to use these routines, you should have a good reason, such as solid
|
||||||
|
// evidence that performance would otherwise suffer, or there being no
|
||||||
|
// alternative. You should assume only properties explicitly guaranteed by the
|
||||||
|
// specifications in this file. You are almost certainly _not_ writing code
|
||||||
|
// just for the x86; if you assume x86 semantics, x86 hardware bugs and
|
||||||
|
// implementations on other archtectures will cause your code to break. If you
|
||||||
|
// do not know what you are doing, avoid these routines, and use a Mutex.
|
||||||
|
//
|
||||||
|
// It is incorrect to make direct assignments to/from an atomic variable.
|
||||||
|
// You should use one of the Load or Store routines. The NoBarrier
|
||||||
|
// versions are provided when no barriers are needed:
|
||||||
|
// NoBarrier_Store()
|
||||||
|
// NoBarrier_Load()
|
||||||
|
// Although there are currently no compiler enforcement, you are encouraged
|
||||||
|
// to use these.
|
||||||
|
|
||||||
|
// This header and the implementations for each platform (located in
|
||||||
|
// atomicops_internals_*) must be kept in sync with the upstream code (V8).
|
||||||
|
|
||||||
|
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_H_ |
||||||
|
#define GOOGLE_PROTOBUF_ATOMICOPS_H_ |
||||||
|
|
||||||
|
// Don't include this file for people not concerned about thread safety.
|
||||||
|
#ifndef GOOGLE_PROTOBUF_NO_THREADSAFETY |
||||||
|
|
||||||
|
#include <google/protobuf/stubs/platform_macros.h> |
||||||
|
|
||||||
|
namespace google { |
||||||
|
namespace protobuf { |
||||||
|
namespace internal { |
||||||
|
|
||||||
|
typedef int32 Atomic32; |
||||||
|
#ifdef GOOGLE_PROTOBUF_HOST_ARCH_64_BIT |
||||||
|
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
|
||||||
|
// means Atomic64 and AtomicWord should be the same type on 64-bit.
|
||||||
|
#if defined(__APPLE__) |
||||||
|
// MacOS is an exception to the implicit conversion rule above,
|
||||||
|
// because it uses long for intptr_t.
|
||||||
|
typedef int64 Atomic64; |
||||||
|
#else |
||||||
|
typedef intptr_t Atomic64; |
||||||
|
#endif |
||||||
|
#endif |
||||||
|
|
||||||
|
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
|
||||||
|
// Atomic64 routines below, depending on your architecture.
|
||||||
|
typedef intptr_t AtomicWord; |
||||||
|
|
||||||
|
// Atomically execute:
|
||||||
|
// result = *ptr;
|
||||||
|
// if (*ptr == old_value)
|
||||||
|
// *ptr = new_value;
|
||||||
|
// return result;
|
||||||
|
//
|
||||||
|
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
|
||||||
|
// Always return the old value of "*ptr"
|
||||||
|
//
|
||||||
|
// This routine implies no memory barriers.
|
||||||
|
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value); |
||||||
|
|
||||||
|
// Atomically store new_value into *ptr, returning the previous value held in
|
||||||
|
// *ptr. This routine implies no memory barriers.
|
||||||
|
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); |
||||||
|
|
||||||
|
// Atomically increment *ptr by "increment". Returns the new value of
|
||||||
|
// *ptr with the increment applied. This routine implies no memory barriers.
|
||||||
|
Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); |
||||||
|
|
||||||
|
Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
||||||
|
Atomic32 increment); |
||||||
|
|
||||||
|
// These following lower-level operations are typically useful only to people
|
||||||
|
// implementing higher-level synchronization operations like spinlocks,
|
||||||
|
// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
|
||||||
|
// a store with appropriate memory-ordering instructions. "Acquire" operations
|
||||||
|
// ensure that no later memory access can be reordered ahead of the operation.
|
||||||
|
// "Release" operations ensure that no previous memory access can be reordered
|
||||||
|
// after the operation. "Barrier" operations have both "Acquire" and "Release"
|
||||||
|
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
|
||||||
|
// access.
|
||||||
|
Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value); |
||||||
|
Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value); |
||||||
|
|
||||||
|
void MemoryBarrier(); |
||||||
|
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); |
||||||
|
void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); |
||||||
|
void Release_Store(volatile Atomic32* ptr, Atomic32 value); |
||||||
|
|
||||||
|
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); |
||||||
|
Atomic32 Acquire_Load(volatile const Atomic32* ptr); |
||||||
|
Atomic32 Release_Load(volatile const Atomic32* ptr); |
||||||
|
|
||||||
|
// 64-bit atomic operations (only available on 64-bit processors).
|
||||||
|
#ifdef GOOGLE_PROTOBUF_HOST_ARCH_64_BIT |
||||||
|
Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value); |
||||||
|
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); |
||||||
|
Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); |
||||||
|
Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); |
||||||
|
|
||||||
|
Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value); |
||||||
|
Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value); |
||||||
|
void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); |
||||||
|
void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); |
||||||
|
void Release_Store(volatile Atomic64* ptr, Atomic64 value); |
||||||
|
Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); |
||||||
|
Atomic64 Acquire_Load(volatile const Atomic64* ptr); |
||||||
|
Atomic64 Release_Load(volatile const Atomic64* ptr); |
||||||
|
#endif // GOOGLE_PROTOBUF_HOST_ARCH_64_BIT
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace protobuf
|
||||||
|
} // namespace google
|
||||||
|
|
||||||
|
// Include our platform specific implementation.
|
||||||
|
#if defined(_MSC_VER) && \ |
||||||
|
(defined(GOOGLE_PROTOBUF_HOST_ARCH_IA32) || \
|
||||||
|
defined(GOOGLE_PROTOBUF_HOST_ARCH_X64)) |
||||||
|
#include "atomicops_internals_x86_msvc.h" |
||||||
|
#elif defined(__APPLE__) && \ |
||||||
|
(defined(GOOGLE_PROTOBUF_HOST_ARCH_IA32) || \
|
||||||
|
defined(GOOGLE_PROTOBUF_HOST_ARCH_X64)) |
||||||
|
#include "atomicops_internals_x86_macosx.h" |
||||||
|
#elif defined(__GNUC__) && \ |
||||||
|
(defined(GOOGLE_PROTOBUF_HOST_ARCH_IA32) || \
|
||||||
|
defined(GOOGLE_PROTOBUF_HOST_ARCH_X64)) |
||||||
|
#include "atomicops_internals_x86_gcc.h" |
||||||
|
#elif defined(__GNUC__) && defined(GOOGLE_PROTOBUF_HOST_ARCH_ARM) |
||||||
|
#include "atomicops_internals_arm_gcc.h" |
||||||
|
#elif defined(__GNUC__) && defined(GOOGLE_PROTOBUF_HOST_ARCH_MIPS) |
||||||
|
#include "atomicops_internals_mips_gcc.h" |
||||||
|
#else |
||||||
|
#error "Atomic operations are not supported on your platform" |
||||||
|
#endif |
||||||
|
|
||||||
|
#endif // GOOGLE_PROTOBUF_NO_THREADSAFETY
|
||||||
|
|
||||||
|
#endif // GOOGLE_PROTOBUF_ATOMICOPS_H_
|
@ -0,0 +1,148 @@ |
|||||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// This file is an internal atomic implementation, use atomicops.h instead.
|
||||||
|
//
|
||||||
|
// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
|
||||||
|
|
||||||
|
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
||||||
|
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
||||||
|
|
||||||
|
namespace google { |
||||||
|
namespace protobuf { |
||||||
|
namespace internal { |
||||||
|
|
||||||
|
// 0xffff0fc0 is the hard coded address of a function provided by
|
||||||
|
// the kernel which implements an atomic compare-exchange. On older
|
||||||
|
// ARM architecture revisions (pre-v6) this may be implemented using
|
||||||
|
// a syscall. This address is stable, and in active use (hard coded)
|
||||||
|
// by at least glibc-2.7 and the Android C library.
|
||||||
|
typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, |
||||||
|
Atomic32 new_value, |
||||||
|
volatile Atomic32* ptr); |
||||||
|
LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = |
||||||
|
(LinuxKernelCmpxchgFunc) 0xffff0fc0; |
||||||
|
|
||||||
|
typedef void (*LinuxKernelMemoryBarrierFunc)(void); |
||||||
|
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = |
||||||
|
(LinuxKernelMemoryBarrierFunc) 0xffff0fa0; |
||||||
|
|
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
Atomic32 prev_value = *ptr; |
||||||
|
do { |
||||||
|
if (!pLinuxKernelCmpxchg(old_value, new_value, |
||||||
|
const_cast<Atomic32*>(ptr))) { |
||||||
|
return old_value; |
||||||
|
} |
||||||
|
prev_value = *ptr; |
||||||
|
} while (prev_value == old_value); |
||||||
|
return prev_value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
||||||
|
Atomic32 new_value) { |
||||||
|
Atomic32 old_value; |
||||||
|
do { |
||||||
|
old_value = *ptr; |
||||||
|
} while (pLinuxKernelCmpxchg(old_value, new_value, |
||||||
|
const_cast<Atomic32*>(ptr))); |
||||||
|
return old_value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
||||||
|
Atomic32 increment) { |
||||||
|
return Barrier_AtomicIncrement(ptr, increment); |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
||||||
|
Atomic32 increment) { |
||||||
|
for (;;) { |
||||||
|
// Atomic exchange the old value with an incremented one.
|
||||||
|
Atomic32 old_value = *ptr; |
||||||
|
Atomic32 new_value = old_value + increment; |
||||||
|
if (pLinuxKernelCmpxchg(old_value, new_value, |
||||||
|
const_cast<Atomic32*>(ptr)) == 0) { |
||||||
|
// The exchange took place as expected.
|
||||||
|
return new_value; |
||||||
|
} |
||||||
|
// Otherwise, *ptr changed mid-loop and we need to retry.
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
inline void MemoryBarrier() { |
||||||
|
pLinuxKernelMemoryBarrier(); |
||||||
|
} |
||||||
|
|
||||||
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
*ptr = value; |
||||||
|
MemoryBarrier(); |
||||||
|
} |
||||||
|
|
||||||
|
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
MemoryBarrier(); |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
||||||
|
Atomic32 value = *ptr; |
||||||
|
MemoryBarrier(); |
||||||
|
return value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
||||||
|
MemoryBarrier(); |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace protobuf
|
||||||
|
} // namespace google
|
||||||
|
|
||||||
|
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_ARM_GCC_H_
|
@ -0,0 +1,184 @@ |
|||||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// This file is an internal atomic implementation, use atomicops.h instead.
|
||||||
|
|
||||||
|
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
||||||
|
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
||||||
|
|
||||||
|
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
||||||
|
|
||||||
|
namespace google { |
||||||
|
namespace protobuf { |
||||||
|
namespace internal { |
||||||
|
|
||||||
|
// Atomically execute:
|
||||||
|
// result = *ptr;
|
||||||
|
// if (*ptr == old_value)
|
||||||
|
// *ptr = new_value;
|
||||||
|
// return result;
|
||||||
|
//
|
||||||
|
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
|
||||||
|
// Always return the old value of "*ptr"
|
||||||
|
//
|
||||||
|
// This routine implies no memory barriers.
|
||||||
|
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
Atomic32 prev, tmp; |
||||||
|
__asm__ __volatile__(".set push\n" |
||||||
|
".set noreorder\n" |
||||||
|
"1:\n" |
||||||
|
"ll %0, %5\n" // prev = *ptr
|
||||||
|
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
|
||||||
|
"move %2, %4\n" // tmp = new_value
|
||||||
|
"sc %2, %1\n" // *ptr = tmp (with atomic check)
|
||||||
|
"beqz %2, 1b\n" // start again on atomic error
|
||||||
|
"nop\n" // delay slot nop
|
||||||
|
"2:\n" |
||||||
|
".set pop\n" |
||||||
|
: "=&r" (prev), "=m" (*ptr), "=&r" (tmp) |
||||||
|
: "Ir" (old_value), "r" (new_value), "m" (*ptr) |
||||||
|
: "memory"); |
||||||
|
return prev; |
||||||
|
} |
||||||
|
|
||||||
|
// Atomically store new_value into *ptr, returning the previous value held in
|
||||||
|
// *ptr. This routine implies no memory barriers.
|
||||||
|
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
||||||
|
Atomic32 new_value) { |
||||||
|
Atomic32 temp, old; |
||||||
|
__asm__ __volatile__(".set push\n" |
||||||
|
".set noreorder\n" |
||||||
|
"1:\n" |
||||||
|
"ll %1, %2\n" // old = *ptr
|
||||||
|
"move %0, %3\n" // temp = new_value
|
||||||
|
"sc %0, %2\n" // *ptr = temp (with atomic check)
|
||||||
|
"beqz %0, 1b\n" // start again on atomic error
|
||||||
|
"nop\n" // delay slot nop
|
||||||
|
".set pop\n" |
||||||
|
: "=&r" (temp), "=&r" (old), "=m" (*ptr) |
||||||
|
: "r" (new_value), "m" (*ptr) |
||||||
|
: "memory"); |
||||||
|
|
||||||
|
return old; |
||||||
|
} |
||||||
|
|
||||||
|
// Atomically increment *ptr by "increment". Returns the new value of
|
||||||
|
// *ptr with the increment applied. This routine implies no memory barriers.
|
||||||
|
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
||||||
|
Atomic32 increment) { |
||||||
|
Atomic32 temp, temp2; |
||||||
|
|
||||||
|
__asm__ __volatile__(".set push\n" |
||||||
|
".set noreorder\n" |
||||||
|
"1:\n" |
||||||
|
"ll %0, %2\n" // temp = *ptr
|
||||||
|
"addu %1, %0, %3\n" // temp2 = temp + increment
|
||||||
|
"sc %1, %2\n" // *ptr = temp2 (with atomic check)
|
||||||
|
"beqz %1, 1b\n" // start again on atomic error
|
||||||
|
"addu %1, %0, %3\n" // temp2 = temp + increment
|
||||||
|
".set pop\n" |
||||||
|
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
||||||
|
: "Ir" (increment), "m" (*ptr) |
||||||
|
: "memory"); |
||||||
|
// temp2 now holds the final value.
|
||||||
|
return temp2; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
||||||
|
Atomic32 increment) { |
||||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||||
|
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
||||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||||
|
return res; |
||||||
|
} |
||||||
|
|
||||||
|
// "Acquire" operations
|
||||||
|
// ensure that no later memory access can be reordered ahead of the operation.
|
||||||
|
// "Release" operations ensure that no previous memory access can be reordered
|
||||||
|
// after the operation. "Barrier" operations have both "Acquire" and "Release"
|
||||||
|
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
|
||||||
|
// access.
|
||||||
|
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||||
|
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||||
|
return res; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||||
|
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||||
|
return res; |
||||||
|
} |
||||||
|
|
||||||
|
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
inline void MemoryBarrier() { |
||||||
|
__asm__ __volatile__("sync" : : : "memory"); |
||||||
|
} |
||||||
|
|
||||||
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
*ptr = value; |
||||||
|
MemoryBarrier(); |
||||||
|
} |
||||||
|
|
||||||
|
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
MemoryBarrier(); |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
||||||
|
Atomic32 value = *ptr; |
||||||
|
MemoryBarrier(); |
||||||
|
return value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
||||||
|
MemoryBarrier(); |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace protobuf
|
||||||
|
} // namespace google
|
||||||
|
|
||||||
|
#undef ATOMICOPS_COMPILER_BARRIER |
||||||
|
|
||||||
|
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
|
@ -0,0 +1,137 @@ |
|||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
// http://code.google.com/p/protobuf/
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// This module gets enough CPU information to optimize the
|
||||||
|
// atomicops module on x86.
|
||||||
|
|
||||||
|
#include <cstring> |
||||||
|
|
||||||
|
#include "atomicops.h" |
||||||
|
|
||||||
|
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
|
||||||
|
// depends on structs that are defined in that file. If atomicops.h
|
||||||
|
// doesn't sub-include that file, then we aren't needed, and shouldn't
|
||||||
|
// try to do anything.
|
||||||
|
#ifdef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_ |
||||||
|
|
||||||
|
// Inline cpuid instruction. In PIC compilations, %ebx contains the address
|
||||||
|
// of the global offset table. To avoid breaking such executables, this code
|
||||||
|
// must preserve that register's value across cpuid instructions.
|
||||||
|
#if defined(__i386__) |
||||||
|
#define cpuid(a, b, c, d, inp) \ |
||||||
|
asm("mov %%ebx, %%edi\n" \
|
||||||
|
"cpuid\n" \
|
||||||
|
"xchg %%edi, %%ebx\n" \
|
||||||
|
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) |
||||||
|
#elif defined(__x86_64__) |
||||||
|
#define cpuid(a, b, c, d, inp) \ |
||||||
|
asm("mov %%rbx, %%rdi\n" \
|
||||||
|
"cpuid\n" \
|
||||||
|
"xchg %%rdi, %%rbx\n" \
|
||||||
|
: "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp)) |
||||||
|
#endif |
||||||
|
|
||||||
|
#if defined(cpuid) // initialize the struct only on x86
|
||||||
|
|
||||||
|
namespace google { |
||||||
|
namespace protobuf { |
||||||
|
namespace internal { |
||||||
|
|
||||||
|
// Set the flags so that code will run correctly and conservatively, so even
|
||||||
|
// if we haven't been initialized yet, we're probably single threaded, and our
|
||||||
|
// default values should hopefully be pretty safe.
|
||||||
|
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = { |
||||||
|
false, // bug can't exist before process spawns multiple threads
|
||||||
|
false, // no SSE2
|
||||||
|
}; |
||||||
|
|
||||||
|
namespace { |
||||||
|
|
||||||
|
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
|
||||||
|
void AtomicOps_Internalx86CPUFeaturesInit() { |
||||||
|
uint32_t eax; |
||||||
|
uint32_t ebx; |
||||||
|
uint32_t ecx; |
||||||
|
uint32_t edx; |
||||||
|
|
||||||
|
// Get vendor string (issue CPUID with eax = 0)
|
||||||
|
cpuid(eax, ebx, ecx, edx, 0); |
||||||
|
char vendor[13]; |
||||||
|
memcpy(vendor, &ebx, 4); |
||||||
|
memcpy(vendor + 4, &edx, 4); |
||||||
|
memcpy(vendor + 8, &ecx, 4); |
||||||
|
vendor[12] = 0; |
||||||
|
|
||||||
|
// get feature flags in ecx/edx, and family/model in eax
|
||||||
|
cpuid(eax, ebx, ecx, edx, 1); |
||||||
|
|
||||||
|
int family = (eax >> 8) & 0xf; // family and model fields
|
||||||
|
int model = (eax >> 4) & 0xf; |
||||||
|
if (family == 0xf) { // use extended family and model fields
|
||||||
|
family += (eax >> 20) & 0xff; |
||||||
|
model += ((eax >> 16) & 0xf) << 4; |
||||||
|
} |
||||||
|
|
||||||
|
// Opteron Rev E has a bug in which on very rare occasions a locked
|
||||||
|
// instruction doesn't act as a read-acquire barrier if followed by a
|
||||||
|
// non-locked read-modify-write instruction. Rev F has this bug in
|
||||||
|
// pre-release versions, but not in versions released to customers,
|
||||||
|
// so we test only for Rev E, which is family 15, model 32..63 inclusive.
|
||||||
|
if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
|
||||||
|
family == 15 && |
||||||
|
32 <= model && model <= 63) { |
||||||
|
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true; |
||||||
|
} else { |
||||||
|
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false; |
||||||
|
} |
||||||
|
|
||||||
|
// edx bit 26 is SSE2 which we use to tell use whether we can use mfence
|
||||||
|
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1); |
||||||
|
} |
||||||
|
|
||||||
|
class AtomicOpsx86Initializer { |
||||||
|
public: |
||||||
|
AtomicOpsx86Initializer() { |
||||||
|
AtomicOps_Internalx86CPUFeaturesInit(); |
||||||
|
} |
||||||
|
}; |
||||||
|
|
||||||
|
// A global to get use initialized on startup via static initialization :/
|
||||||
|
AtomicOpsx86Initializer g_initer; |
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace protobuf
|
||||||
|
} // namespace google
|
||||||
|
|
||||||
|
#endif // __i386__
|
||||||
|
|
||||||
|
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
|
@ -0,0 +1,290 @@ |
|||||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// This file is an internal atomic implementation, use atomicops.h instead.
|
||||||
|
|
||||||
|
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_ |
||||||
|
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_ |
||||||
|
|
||||||
|
namespace google { |
||||||
|
namespace protobuf { |
||||||
|
namespace internal { |
||||||
|
|
||||||
|
// This struct is not part of the public API of this module; clients may not
|
||||||
|
// use it.
|
||||||
|
// Features of this x86. Values may not be correct before main() is run,
|
||||||
|
// but are set conservatively.
|
||||||
|
struct AtomicOps_x86CPUFeatureStruct { |
||||||
|
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
|
||||||
|
// after acquire compare-and-swap.
|
||||||
|
bool has_sse2; // Processor has SSE2.
|
||||||
|
}; |
||||||
|
extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; |
||||||
|
|
||||||
|
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
||||||
|
|
||||||
|
// 32-bit low-level operations on any platform.
|
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
Atomic32 prev; |
||||||
|
__asm__ __volatile__("lock; cmpxchgl %1,%2" |
||||||
|
: "=a" (prev) |
||||||
|
: "q" (new_value), "m" (*ptr), "0" (old_value) |
||||||
|
: "memory"); |
||||||
|
return prev; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
||||||
|
Atomic32 new_value) { |
||||||
|
__asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
|
||||||
|
: "=r" (new_value) |
||||||
|
: "m" (*ptr), "0" (new_value) |
||||||
|
: "memory"); |
||||||
|
return new_value; // Now it's the previous value.
|
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
||||||
|
Atomic32 increment) { |
||||||
|
Atomic32 temp = increment; |
||||||
|
__asm__ __volatile__("lock; xaddl %0,%1" |
||||||
|
: "+r" (temp), "+m" (*ptr) |
||||||
|
: : "memory"); |
||||||
|
// temp now holds the old value of *ptr
|
||||||
|
return temp + increment; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
||||||
|
Atomic32 increment) { |
||||||
|
Atomic32 temp = increment; |
||||||
|
__asm__ __volatile__("lock; xaddl %0,%1" |
||||||
|
: "+r" (temp), "+m" (*ptr) |
||||||
|
: : "memory"); |
||||||
|
// temp now holds the old value of *ptr
|
||||||
|
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { |
||||||
|
__asm__ __volatile__("lfence" : : : "memory"); |
||||||
|
} |
||||||
|
return temp + increment; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { |
||||||
|
__asm__ __volatile__("lfence" : : : "memory"); |
||||||
|
} |
||||||
|
return x; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
#if defined(__x86_64__) |
||||||
|
|
||||||
|
// 64-bit implementations of memory barrier can be simpler, because it
|
||||||
|
// "mfence" is guaranteed to exist.
|
||||||
|
inline void MemoryBarrier() { |
||||||
|
__asm__ __volatile__("mfence" : : : "memory"); |
||||||
|
} |
||||||
|
|
||||||
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
*ptr = value; |
||||||
|
MemoryBarrier(); |
||||||
|
} |
||||||
|
|
||||||
|
#else |
||||||
|
|
||||||
|
inline void MemoryBarrier() { |
||||||
|
if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
||||||
|
__asm__ __volatile__("mfence" : : : "memory"); |
||||||
|
} else { // mfence is faster but not present on PIII
|
||||||
|
Atomic32 x = 0; |
||||||
|
NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
||||||
|
*ptr = value; |
||||||
|
__asm__ __volatile__("mfence" : : : "memory"); |
||||||
|
} else { |
||||||
|
NoBarrier_AtomicExchange(ptr, value); |
||||||
|
// acts as a barrier on PIII
|
||||||
|
} |
||||||
|
} |
||||||
|
#endif |
||||||
|
|
||||||
|
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||||
|
*ptr = value; // An x86 store acts as a release barrier.
|
||||||
|
// See comments in Atomic64 version of Release_Store(), below.
|
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
||||||
|
Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
|
||||||
|
// See comments in Atomic64 version of Release_Store(), below.
|
||||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||||
|
return value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
||||||
|
MemoryBarrier(); |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
#if defined(__x86_64__) |
||||||
|
|
||||||
|
// 64-bit low-level operations on 64-bit platform.
|
||||||
|
|
||||||
|
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value) { |
||||||
|
Atomic64 prev; |
||||||
|
__asm__ __volatile__("lock; cmpxchgq %1,%2" |
||||||
|
: "=a" (prev) |
||||||
|
: "q" (new_value), "m" (*ptr), "0" (old_value) |
||||||
|
: "memory"); |
||||||
|
return prev; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
||||||
|
Atomic64 new_value) { |
||||||
|
__asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
|
||||||
|
: "=r" (new_value) |
||||||
|
: "m" (*ptr), "0" (new_value) |
||||||
|
: "memory"); |
||||||
|
return new_value; // Now it's the previous value.
|
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
||||||
|
Atomic64 increment) { |
||||||
|
Atomic64 temp = increment; |
||||||
|
__asm__ __volatile__("lock; xaddq %0,%1" |
||||||
|
: "+r" (temp), "+m" (*ptr) |
||||||
|
: : "memory"); |
||||||
|
// temp now contains the previous value of *ptr
|
||||||
|
return temp + increment; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
||||||
|
Atomic64 increment) { |
||||||
|
Atomic64 temp = increment; |
||||||
|
__asm__ __volatile__("lock; xaddq %0,%1" |
||||||
|
: "+r" (temp), "+m" (*ptr) |
||||||
|
: : "memory"); |
||||||
|
// temp now contains the previous value of *ptr
|
||||||
|
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { |
||||||
|
__asm__ __volatile__("lfence" : : : "memory"); |
||||||
|
} |
||||||
|
return temp + increment; |
||||||
|
} |
||||||
|
|
||||||
|
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||||
|
*ptr = value; |
||||||
|
MemoryBarrier(); |
||||||
|
} |
||||||
|
|
||||||
|
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||||
|
|
||||||
|
*ptr = value; // An x86 store acts as a release barrier
|
||||||
|
// for current AMD/Intel chips as of Jan 2008.
|
||||||
|
// See also Acquire_Load(), below.
|
||||||
|
|
||||||
|
// When new chips come out, check:
|
||||||
|
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
|
||||||
|
// System Programming Guide, Chatper 7: Multiple-processor management,
|
||||||
|
// Section 7.2, Memory Ordering.
|
||||||
|
// Last seen at:
|
||||||
|
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
|
||||||
|
//
|
||||||
|
// x86 stores/loads fail to act as barriers for a few instructions (clflush
|
||||||
|
// maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
|
||||||
|
// not generated by the compiler, and are rare. Users of these instructions
|
||||||
|
// need to know about cache behaviour in any case since all of these involve
|
||||||
|
// either flushing cache lines or non-temporal cache hints.
|
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
||||||
|
Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
|
||||||
|
// for current AMD/Intel chips as of Jan 2008.
|
||||||
|
// See also Release_Store(), above.
|
||||||
|
ATOMICOPS_COMPILER_BARRIER(); |
||||||
|
return value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
||||||
|
MemoryBarrier(); |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value) { |
||||||
|
Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { |
||||||
|
__asm__ __volatile__("lfence" : : : "memory"); |
||||||
|
} |
||||||
|
return x; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value) { |
||||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
#endif // defined(__x86_64__)
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace protobuf
|
||||||
|
} // namespace google
|
||||||
|
|
||||||
|
#undef ATOMICOPS_COMPILER_BARRIER |
||||||
|
|
||||||
|
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_
|
@ -0,0 +1,304 @@ |
|||||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// This file is an internal atomic implementation, use atomicops.h instead.
|
||||||
|
|
||||||
|
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |
||||||
|
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |
||||||
|
|
||||||
|
#include <libkern/OSAtomic.h> |
||||||
|
|
||||||
|
namespace google { |
||||||
|
namespace protobuf { |
||||||
|
namespace internal { |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
Atomic32 prev_value; |
||||||
|
do { |
||||||
|
if (OSAtomicCompareAndSwap32(old_value, new_value, |
||||||
|
const_cast<Atomic32*>(ptr))) { |
||||||
|
return old_value; |
||||||
|
} |
||||||
|
prev_value = *ptr; |
||||||
|
} while (prev_value == old_value); |
||||||
|
return prev_value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
||||||
|
Atomic32 new_value) { |
||||||
|
Atomic32 old_value; |
||||||
|
do { |
||||||
|
old_value = *ptr; |
||||||
|
} while (!OSAtomicCompareAndSwap32(old_value, new_value, |
||||||
|
const_cast<Atomic32*>(ptr))); |
||||||
|
return old_value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
||||||
|
Atomic32 increment) { |
||||||
|
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
||||||
|
Atomic32 increment) { |
||||||
|
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); |
||||||
|
} |
||||||
|
|
||||||
|
inline void MemoryBarrier() { |
||||||
|
OSMemoryBarrier(); |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
Atomic32 prev_value; |
||||||
|
do { |
||||||
|
if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, |
||||||
|
const_cast<Atomic32*>(ptr))) { |
||||||
|
return old_value; |
||||||
|
} |
||||||
|
prev_value = *ptr; |
||||||
|
} while (prev_value == old_value); |
||||||
|
return prev_value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
return Acquire_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
*ptr = value; |
||||||
|
MemoryBarrier(); |
||||||
|
} |
||||||
|
|
||||||
|
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
MemoryBarrier(); |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
||||||
|
Atomic32 value = *ptr; |
||||||
|
MemoryBarrier(); |
||||||
|
return value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
||||||
|
MemoryBarrier(); |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
#ifdef __LP64__ |
||||||
|
|
||||||
|
// 64-bit implementation on 64-bit platform
|
||||||
|
|
||||||
|
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value) { |
||||||
|
Atomic64 prev_value; |
||||||
|
do { |
||||||
|
if (OSAtomicCompareAndSwap64(old_value, new_value, |
||||||
|
const_cast<Atomic64*>(ptr))) { |
||||||
|
return old_value; |
||||||
|
} |
||||||
|
prev_value = *ptr; |
||||||
|
} while (prev_value == old_value); |
||||||
|
return prev_value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
||||||
|
Atomic64 new_value) { |
||||||
|
Atomic64 old_value; |
||||||
|
do { |
||||||
|
old_value = *ptr; |
||||||
|
} while (!OSAtomicCompareAndSwap64(old_value, new_value, |
||||||
|
const_cast<Atomic64*>(ptr))); |
||||||
|
return old_value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
||||||
|
Atomic64 increment) { |
||||||
|
return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr)); |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
||||||
|
Atomic64 increment) { |
||||||
|
return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr)); |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value) { |
||||||
|
Atomic64 prev_value; |
||||||
|
do { |
||||||
|
if (OSAtomicCompareAndSwap64Barrier(old_value, new_value, |
||||||
|
const_cast<Atomic64*>(ptr))) { |
||||||
|
return old_value; |
||||||
|
} |
||||||
|
prev_value = *ptr; |
||||||
|
} while (prev_value == old_value); |
||||||
|
return prev_value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value) { |
||||||
|
// The lib kern interface does not distinguish between
|
||||||
|
// Acquire and Release memory barriers; they are equivalent.
|
||||||
|
return Acquire_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||||
|
*ptr = value; |
||||||
|
MemoryBarrier(); |
||||||
|
} |
||||||
|
|
||||||
|
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||||
|
MemoryBarrier(); |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
||||||
|
Atomic64 value = *ptr; |
||||||
|
MemoryBarrier(); |
||||||
|
return value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
||||||
|
MemoryBarrier(); |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
#endif // defined(__LP64__)
|
||||||
|
|
||||||
|
// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
|
||||||
|
// on the Mac, even when they are the same size. We need to explicitly cast
|
||||||
|
// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
|
||||||
|
#ifdef __LP64__ |
||||||
|
#define AtomicWordCastType Atomic64 |
||||||
|
#else |
||||||
|
#define AtomicWordCastType Atomic32 |
||||||
|
#endif |
||||||
|
|
||||||
|
inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, |
||||||
|
AtomicWord old_value, |
||||||
|
AtomicWord new_value) { |
||||||
|
return NoBarrier_CompareAndSwap( |
||||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
||||||
|
old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, |
||||||
|
AtomicWord new_value) { |
||||||
|
return NoBarrier_AtomicExchange( |
||||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, |
||||||
|
AtomicWord increment) { |
||||||
|
return NoBarrier_AtomicIncrement( |
||||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); |
||||||
|
} |
||||||
|
|
||||||
|
inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, |
||||||
|
AtomicWord increment) { |
||||||
|
return Barrier_AtomicIncrement( |
||||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); |
||||||
|
} |
||||||
|
|
||||||
|
inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, |
||||||
|
AtomicWord old_value, |
||||||
|
AtomicWord new_value) { |
||||||
|
return v8::internal::Acquire_CompareAndSwap( |
||||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
||||||
|
old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, |
||||||
|
AtomicWord old_value, |
||||||
|
AtomicWord new_value) { |
||||||
|
return v8::internal::Release_CompareAndSwap( |
||||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
||||||
|
old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) { |
||||||
|
NoBarrier_Store( |
||||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
||||||
|
} |
||||||
|
|
||||||
|
inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { |
||||||
|
return v8::internal::Acquire_Store( |
||||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
||||||
|
} |
||||||
|
|
||||||
|
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { |
||||||
|
return v8::internal::Release_Store( |
||||||
|
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); |
||||||
|
} |
||||||
|
|
||||||
|
inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) { |
||||||
|
return NoBarrier_Load( |
||||||
|
reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
||||||
|
} |
||||||
|
|
||||||
|
inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { |
||||||
|
return v8::internal::Acquire_Load( |
||||||
|
reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
||||||
|
} |
||||||
|
|
||||||
|
inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { |
||||||
|
return v8::internal::Release_Load( |
||||||
|
reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
||||||
|
} |
||||||
|
|
||||||
|
#undef AtomicWordCastType |
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace protobuf
|
||||||
|
} // namespace google
|
||||||
|
|
||||||
|
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MACOSX_H_
|
@ -0,0 +1,106 @@ |
|||||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// The compilation of extension_set.cc fails when windows.h is included.
|
||||||
|
// Therefore we move the code depending on windows.h to this separate cc file.
|
||||||
|
|
||||||
|
// Don't compile this file for people not concerned about thread safety.
|
||||||
|
#ifndef GOOGLE_PROTOBUF_NO_THREADSAFETY |
||||||
|
|
||||||
|
#include <google/protobuf/stubs/atomicops.h> |
||||||
|
|
||||||
|
#include <windows.h> |
||||||
|
|
||||||
|
namespace google { |
||||||
|
namespace protobuf { |
||||||
|
namespace internal { |
||||||
|
|
||||||
|
inline void MemoryBarrier() { |
||||||
|
// We use MemoryBarrier from WinNT.h
|
||||||
|
::MemoryBarrier(); |
||||||
|
} |
||||||
|
|
||||||
|
Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
LONG result = InterlockedCompareExchange( |
||||||
|
reinterpret_cast<volatile LONG*>(ptr), |
||||||
|
static_cast<LONG>(new_value), |
||||||
|
static_cast<LONG>(old_value)); |
||||||
|
return static_cast<Atomic32>(result); |
||||||
|
} |
||||||
|
|
||||||
|
Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
||||||
|
Atomic32 new_value) { |
||||||
|
LONG result = InterlockedExchange( |
||||||
|
reinterpret_cast<volatile LONG*>(ptr), |
||||||
|
static_cast<LONG>(new_value)); |
||||||
|
return static_cast<Atomic32>(result); |
||||||
|
} |
||||||
|
|
||||||
|
Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
||||||
|
Atomic32 increment) { |
||||||
|
return InterlockedExchangeAdd( |
||||||
|
reinterpret_cast<volatile LONG*>(ptr), |
||||||
|
static_cast<LONG>(increment)) + increment; |
||||||
|
} |
||||||
|
|
||||||
|
#if defined(_WIN64) |
||||||
|
|
||||||
|
// 64-bit low-level operations on 64-bit platform.
|
||||||
|
|
||||||
|
Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value) { |
||||||
|
PVOID result = InterlockedCompareExchangePointer( |
||||||
|
reinterpret_cast<volatile PVOID*>(ptr), |
||||||
|
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); |
||||||
|
return reinterpret_cast<Atomic64>(result); |
||||||
|
} |
||||||
|
|
||||||
|
Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
||||||
|
Atomic64 new_value) { |
||||||
|
PVOID result = InterlockedExchangePointer( |
||||||
|
reinterpret_cast<volatile PVOID*>(ptr), |
||||||
|
reinterpret_cast<PVOID>(new_value)); |
||||||
|
return reinterpret_cast<Atomic64>(result); |
||||||
|
} |
||||||
|
|
||||||
|
Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
||||||
|
Atomic64 increment) { |
||||||
|
return InterlockedExchangeAdd64( |
||||||
|
reinterpret_cast<volatile LONGLONG*>(ptr), |
||||||
|
static_cast<LONGLONG>(increment)) + increment; |
||||||
|
} |
||||||
|
|
||||||
|
#endif // defined(_WIN64)
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace protobuf
|
||||||
|
} // namespace google
|
||||||
|
|
||||||
|
#endif // GOOGLE_PROTOBUF_NO_THREADSAFETY
|
@ -0,0 +1,147 @@ |
|||||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// This file is an internal atomic implementation, use atomicops.h instead.
|
||||||
|
|
||||||
|
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_ |
||||||
|
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_ |
||||||
|
|
||||||
|
namespace google { |
||||||
|
namespace protobuf { |
||||||
|
namespace internal { |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
||||||
|
Atomic32 increment) { |
||||||
|
return Barrier_AtomicIncrement(ptr, increment); |
||||||
|
} |
||||||
|
|
||||||
|
#if !(defined(_MSC_VER) && _MSC_VER >= 1400) |
||||||
|
#error "We require at least vs2005 for MemoryBarrier" |
||||||
|
#endif |
||||||
|
|
||||||
|
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
||||||
|
Atomic32 old_value, |
||||||
|
Atomic32 new_value) { |
||||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
NoBarrier_AtomicExchange(ptr, value); |
||||||
|
// acts as a barrier in this implementation
|
||||||
|
} |
||||||
|
|
||||||
|
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
||||||
|
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
|
||||||
|
// See comments in Atomic64 version of Release_Store() below.
|
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
||||||
|
Atomic32 value = *ptr; |
||||||
|
return value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
||||||
|
MemoryBarrier(); |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
#if defined(_WIN64) |
||||||
|
|
||||||
|
// 64-bit low-level operations on 64-bit platform.
|
||||||
|
|
||||||
|
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
||||||
|
Atomic64 increment) { |
||||||
|
return Barrier_AtomicIncrement(ptr, increment); |
||||||
|
} |
||||||
|
|
||||||
|
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||||
|
*ptr = value; |
||||||
|
} |
||||||
|
|
||||||
|
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||||
|
NoBarrier_AtomicExchange(ptr, value); |
||||||
|
// acts as a barrier in this implementation
|
||||||
|
} |
||||||
|
|
||||||
|
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
||||||
|
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
|
||||||
|
|
||||||
|
// When new chips come out, check:
|
||||||
|
// IA-32 Intel Architecture Software Developer's Manual, Volume 3:
|
||||||
|
// System Programming Guide, Chatper 7: Multiple-processor management,
|
||||||
|
// Section 7.2, Memory Ordering.
|
||||||
|
// Last seen at:
|
||||||
|
// http://developer.intel.com/design/pentium4/manuals/index_new.htm
|
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
||||||
|
Atomic64 value = *ptr; |
||||||
|
return value; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
||||||
|
MemoryBarrier(); |
||||||
|
return *ptr; |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value) { |
||||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
||||||
|
Atomic64 old_value, |
||||||
|
Atomic64 new_value) { |
||||||
|
return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
||||||
|
} |
||||||
|
|
||||||
|
#endif // defined(_WIN64)
|
||||||
|
|
||||||
|
} // namespace internal
|
||||||
|
} // namespace protobuf
|
||||||
|
} // namespace google
|
||||||
|
|
||||||
|
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_MSVC_H_
|
@ -0,0 +1,122 @@ |
|||||||
|
// Copyright 2011 the V8 authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
#ifndef GOOGLE_PROTOBUF_PLATFORM_MACROS_H_ |
||||||
|
#define GOOGLE_PROTOBUF_PLATFORM_MACROS_H_ |
||||||
|
|
||||||
|
#include <google/protobuf/stubs/common.h> |
||||||
|
|
||||||
|
// Processor architecture detection. For more info on what's defined, see:
|
||||||
|
// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
|
||||||
|
// http://www.agner.org/optimize/calling_conventions.pdf
|
||||||
|
// or with gcc, run: "echo | gcc -E -dM -"
|
||||||
|
#if defined(_M_X64) || defined(__x86_64__) |
||||||
|
#define GOOGLE_PROTOBUF_HOST_ARCH_X64 1 |
||||||
|
#define GOOGLE_PROTOBUF_HOST_ARCH_64_BIT 1 |
||||||
|
#define GOOGLE_PROTOBUF_HOST_CAN_READ_UNALIGNED 1 |
||||||
|
#elif defined(_M_IX86) || defined(__i386__) |
||||||
|
#define GOOGLE_PROTOBUF_HOST_ARCH_IA32 1 |
||||||
|
#define GOOGLE_PROTOBUF_HOST_ARCH_32_BIT 1 |
||||||
|
#define GOOGLE_PROTOBUF_HOST_CAN_READ_UNALIGNED 1 |
||||||
|
#elif defined(__ARMEL__) |
||||||
|
#define GOOGLE_PROTOBUF_HOST_ARCH_ARM 1 |
||||||
|
#define GOOGLE_PROTOBUF_HOST_ARCH_32_BIT 1 |
||||||
|
// Some CPU-OS combinations allow unaligned access on ARM. We assume
|
||||||
|
// that unaligned accesses are not allowed unless the build system
|
||||||
|
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
|
||||||
|
#if CAN_USE_UNALIGNED_ACCESSES |
||||||
|
#define GOOGLE_PROTOBUF_HOST_CAN_READ_UNALIGNED 1 |
||||||
|
#endif |
||||||
|
#elif defined(__MIPSEL__) |
||||||
|
#define GOOGLE_PROTOBUF_HOST_ARCH_MIPS 1 |
||||||
|
#define GOOGLE_PROTOBUF_HOST_ARCH_32_BIT 1 |
||||||
|
#else |
||||||
|
#error Host architecture was not detected as supported by v8 |
||||||
|
#endif |
||||||
|
|
||||||
|
// Target architecture detection. This may be set externally. If not, detect
|
||||||
|
// in the same way as the host architecture, that is, target the native
|
||||||
|
// environment as presented by the compiler.
|
||||||
|
#if !defined(GOOGLE_PROTOBUF_TARGET_ARCH_X64) && \ |
||||||
|
!defined(GOOGLE_PROTOBUF_TARGET_ARCH_IA32) && \
|
||||||
|
!defined(GOOGLE_PROTOBUF_TARGET_ARCH_ARM) && \
|
||||||
|
!defined(GOOGLE_PROTOBUF_TARGET_ARCH_MIPS) |
||||||
|
#if defined(_M_X64) || defined(__x86_64__) |
||||||
|
#define GOOGLE_PROTOBUF_TARGET_ARCH_X64 1 |
||||||
|
#elif defined(_M_IX86) || defined(__i386__) |
||||||
|
#define GOOGLE_PROTOBUF_TARGET_ARCH_IA32 1 |
||||||
|
#elif defined(__ARMEL__) |
||||||
|
#define GOOGLE_PROTOBUF_TARGET_ARCH_ARM 1 |
||||||
|
#elif defined(__MIPSEL__) |
||||||
|
#define GOOGLE_PROTOBUF_TARGET_ARCH_MIPS 1 |
||||||
|
#else |
||||||
|
#error Target architecture was not detected as supported by v8 |
||||||
|
#endif |
||||||
|
#endif |
||||||
|
|
||||||
|
// Check for supported combinations of host and target architectures.
|
||||||
|
#if defined(GOOGLE_PROTOBUF_TARGET_ARCH_IA32) && \ |
||||||
|
!defined(GOOGLE_PROTOBUF_HOST_ARCH_IA32) |
||||||
|
#error Target architecture ia32 is only supported on ia32 host |
||||||
|
#endif |
||||||
|
#if defined(GOOGLE_PROTOBUF_TARGET_ARCH_X64) && \ |
||||||
|
!defined(GOOGLE_PROTOBUF_HOST_ARCH_X64) |
||||||
|
#error Target architecture x64 is only supported on x64 host |
||||||
|
#endif |
||||||
|
#if (defined(GOOGLE_PROTOBUF_TARGET_ARCH_ARM) && \ |
||||||
|
!(defined(GOOGLE_PROTOBUF_HOST_ARCH_IA32) || \
|
||||||
|
defined(GOOGLE_PROTOBUF_HOST_ARCH_ARM))) |
||||||
|
#error Target architecture arm is only supported on arm and ia32 host |
||||||
|
#endif |
||||||
|
#if (defined(GOOGLE_PROTOBUF_TARGET_ARCH_MIPS) && \ |
||||||
|
!(defined(GOOGLE_PROTOBUF_HOST_ARCH_IA32) || \
|
||||||
|
defined(GOOGLE_PROTOBUF_HOST_ARCH_MIPS))) |
||||||
|
#error Target architecture mips is only supported on mips and ia32 host |
||||||
|
#endif |
||||||
|
|
||||||
|
// Define unaligned read for the target architectures supporting it.
|
||||||
|
#if defined(GOOGLE_PROTOBUF_TARGET_ARCH_X64) || \ |
||||||
|
defined(GOOGLE_PROTOBUF_TARGET_ARCH_IA32) |
||||||
|
#define GOOGLE_PROTOBUF_TARGET_CAN_READ_UNALIGNED 1 |
||||||
|
#elif GOOGLE_PROTOBUF_TARGET_ARCH_ARM |
||||||
|
// Some CPU-OS combinations allow unaligned access on ARM. We assume
|
||||||
|
// that unaligned accesses are not allowed unless the build system
|
||||||
|
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
|
||||||
|
#if CAN_USE_UNALIGNED_ACCESSES |
||||||
|
#define GOOGLE_PROTOBUF_TARGET_CAN_READ_UNALIGNED 1 |
||||||
|
#endif |
||||||
|
#elif GOOGLE_PROTOBUF_TARGET_ARCH_MIPS |
||||||
|
#else |
||||||
|
#error Target architecture is not supported by v8 |
||||||
|
#endif |
||||||
|
|
||||||
|
#if (defined(__APPLE__) && defined(__MACH__)) || \ |
||||||
|
defined(__FreeBSD__) || defined(__OpenBSD__) |
||||||
|
#define GOOGLE_PROTOBUF_USING_BSD_ABI |
||||||
|
#endif |
||||||
|
|
||||||
|
#endif // GOOGLE_PROTOBUF_PLATFORM_MACROS_H_
|
Loading…
Reference in new issue