Merge pull request #69 from apaprocki/aix-atomicops

Add AIX/POWER atomicops and fix compilation with IBM xlC C++ compiler.
pull/79/head
Feng Xiao 10 years ago
commit faf581d208
  1. 3
      CONTRIBUTORS.txt
  2. 8
      README.md
  3. 4
      src/google/protobuf/repeated_field.h
  4. 14
      src/google/protobuf/stubs/atomicops.h
  5. 440
      src/google/protobuf/stubs/atomicops_internals_aix.h
  6. 5
      src/google/protobuf/stubs/platform_macros.h
  7. 3
      src/google/protobuf/stubs/type_traits_unittest.cc
  8. 9
      src/google/protobuf/wire_format_lite.cc

@ -91,3 +91,6 @@ Patch contributors:
William Orr <will@worrbase.com>
* Fixed detection of sched_yield on Solaris.
* Added atomicops for Solaris
Andrew Paprocki <andrew@ishiboo.com>
* Fixed minor IBM xlC compiler build issues
* Added atomicops for AIX (POWER)

@ -127,6 +127,14 @@ For advanced usage information on configure and make, see INSTALL.txt.
Also, you will need to use gmake instead of make.
**Note for AIX users**
Compile using the IBM xlC C++ compiler as follows:
./configure CXX=xlC
Also, you will need to use GNU `make` (`gmake`) instead of AIX `make`.
C++ Installation - Windows
--------------------------

@ -2075,11 +2075,11 @@ class RepeatedPtrIterator
it_ += d;
return *this;
}
friend iterator operator+(iterator it, difference_type d) {
friend iterator operator+(iterator it, const difference_type d) {
it += d;
return it;
}
friend iterator operator+(difference_type d, iterator it) {
friend iterator operator+(const difference_type d, iterator it) {
it += d;
return it;
}

@ -62,6 +62,15 @@ namespace google {
namespace protobuf {
namespace internal {
#if defined(GOOGLE_PROTOBUF_ARCH_POWER)
#if defined(_LP64) || defined(__LP64__)
typedef int32 Atomic32;
typedef intptr_t Atomic64;
#else
typedef intptr_t Atomic32;
typedef int64 Atomic64;
#endif
#else
typedef int32 Atomic32;
#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
@ -75,6 +84,7 @@ typedef int64 Atomic64;
typedef intptr_t Atomic64;
#endif
#endif
#endif
// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
// Atomic64 routines below, depending on your architecture.
@ -179,6 +189,10 @@ GOOGLE_PROTOBUF_ATOMICOPS_ERROR
#elif defined(GOOGLE_PROTOBUF_OS_SOLARIS)
#include <google/protobuf/stubs/atomicops_internals_solaris.h>
// AIX
#elif defined(GOOGLE_PROTOBUF_OS_AIX)
#include <google/protobuf/stubs/atomicops_internals_aix.h>
// Apple.
#elif defined(GOOGLE_PROTOBUF_OS_APPLE)
#include <google/protobuf/stubs/atomicops_internals_macosx.h>

@ -0,0 +1,440 @@
// Copyright 2014 Bloomberg Finance LP. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Bloomberg Finance LP. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_AIX_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_AIX_H_
namespace google {
namespace protobuf {
namespace internal {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 result;
asm volatile (
"1: lwarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpw %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stwcx. %[val], %[zero], %[obj] \n\t" // store new value
" bne- 1b \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 result;
asm volatile (
"1: lwarx %[res], %[zero], %[obj] \n\t"
" stwcx. %[val], %[zero], %[obj] \n\t"
" bne- 1b \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
asm volatile (
"1: lwarx %[res], %[zero], %[obj] \n\t" // load and reserve
" add %[res], %[val], %[res] \n\t" // add the operand
" stwcx. %[res], %[zero], %[obj] \n\t" // store old value
// if still reserved
" bne- 1b \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (increment),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline void MemoryBarrier(void) {
asm volatile (
" lwsync \n\t"
" isync \n\t"
:
:
: "memory");
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 result;
asm volatile (
" lwsync \n\t"
"1: lwarx %[res], %[zero], %[obj] \n\t" // load and reserve
" add %[res], %[val], %[res] \n\t" // add the operand
" stwcx. %[res], %[zero], %[obj] \n\t" // store old value
// if still reserved
" bne- 1b \n\t"
" isync \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (increment),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 result;
asm volatile (
"1: lwarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpw %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stwcx. %[val], %[zero], %[obj] \n\t" // store new value
" bne- 1b \n\t"
" isync \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 result;
asm volatile (
" lwsync \n\t"
"1: lwarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpw %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stwcx. %[val], %[zero], %[obj] \n\t" // store new value
" bne- 1b \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
asm volatile (
" stw %[val], %[obj] \n\t"
" isync \n\t"
: [obj] "=m" (*ptr)
: [val] "b" (value));
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
asm volatile (
" lwsync \n\t"
" stw %[val], %[obj] \n\t"
: [obj] "=m" (*ptr)
: [val] "b" (value));
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 result;
asm volatile (
"1: lwz %[res], %[obj] \n\t"
" cmpw %[res], %[res] \n\t" // create data
// dependency for
// load/load ordering
" bne- 1b \n\t" // never taken
" isync \n\t"
: [res] "=b" (result)
: [obj] "m" (*ptr),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
Atomic32 result;
asm volatile (
" lwsync \n\t"
"1: lwz %[res], %[obj] \n\t"
" cmpw %[res], %[res] \n\t" // create data
// dependency for
// load/load ordering
" bne- 1b \n\t" // never taken
: [res] "=b" (result)
: [obj] "m" (*ptr),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
#ifdef GOOGLE_PROTOBUF_ARCH_64_BIT
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 result;
asm volatile (
"1: ldarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpd %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stdcx. %[val], %[zero], %[obj] \n\t" // store the new value
" bne- 1b \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 result;
asm volatile (
"1: ldarx %[res], %[zero], %[obj] \n\t"
" stdcx. %[val], %[zero], %[obj] \n\t"
" bne- 1b \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
asm volatile (
"1: ldarx %[res], %[zero], %[obj] \n\t" // load and reserve
" add %[res], %[res], %[val] \n\t" // add the operand
" stdcx. %[res], %[zero], %[obj] \n\t" // store old value if
// still reserved
" bne- 1b \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (increment),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 result;
asm volatile (
" lwsync \n\t"
"1: ldarx %[res], %[zero], %[obj] \n\t" // load and reserve
" add %[res], %[res], %[val] \n\t" // add the operand
" stdcx. %[res], %[zero], %[obj] \n\t" // store old value if
// still reserved
" bne- 1b \n\t"
" isync \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[val] "b" (increment),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 result;
asm volatile (
"1: ldarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpd %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stdcx. %[val], %[zero], %[obj] \n\t" // store the new value
" bne- 1b \n\t"
" isync \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 result;
asm volatile (
" lwsync \n\t"
"1: ldarx %[res], %[zero], %[obj] \n\t" // load and reserve
" cmpd %[cmp], %[res] \n\t" // compare values
" bne- 2f \n\t"
" stdcx. %[val], %[zero], %[obj] \n\t" // store the new value
" bne- 1b \n\t"
"2: \n\t"
: [res] "=&b" (result)
: [obj] "b" (ptr),
[cmp] "b" (old_value),
[val] "b" (new_value),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
asm volatile (
" std %[val], %[obj] \n\t"
" isync \n\t"
: [obj] "=m" (*ptr)
: [val] "b" (value));
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
asm volatile (
" lwsync \n\t"
" std %[val], %[obj] \n\t"
: [obj] "=m" (*ptr)
: [val] "b" (value));
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 result;
asm volatile (
"1: ld %[res], %[obj] \n\t"
" cmpd %[res], %[res] \n\t" // create data
// dependency for
// load/load ordering
" bne- 1b \n\t" // never taken
" isync \n\t"
: [res] "=b" (result)
: [obj] "m" (*ptr),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
Atomic64 result;
asm volatile (
" lwsync \n\t"
"1: ld %[res], %[obj] \n\t"
" cmpd %[res], %[res] \n\t" // create data
// dependency for
// load/load ordering
" bne- 1b \n\t" // never taken
: [res] "=b" (result)
: [obj] "m" (*ptr),
[zero] "i" (0)
: "cr0", "ctr");
return result;
}
#endif
} // namespace internal
} // namespace protobuf
} // namespace google
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_SPARC_GCC_H_

@ -72,6 +72,9 @@
#else
#define GOOGLE_PROTOBUF_ARCH_32_BIT 1
#endif
#elif defined(_POWER)
#define GOOGLE_PROTOBUF_ARCH_POWER 1
#define GOOGLE_PROTOBUF_ARCH_64_BIT 1
#elif defined(__GNUC__)
# if (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || (__GNUC__ > 4))
// We fallback to the generic Clang/GCC >= 4.7 implementation in atomicops.h
@ -96,6 +99,8 @@ GOOGLE_PROTOBUF_PLATFORM_ERROR
#define GOOGLE_PROTOBUF_OS_NACL
#elif defined(sun)
#define GOOGLE_PROTOBUF_OS_SOLARIS
#elif defined(_AIX)
#define GOOGLE_PROTOBUF_OS_AIX
#endif
#undef GOOGLE_PROTOBUF_PLATFORM_ERROR

@ -41,7 +41,10 @@
#include <gtest/gtest.h>
typedef int int32;
// IBM AIX typedefs `int64` in `sys/inttypes.h`, included transitively above.
#ifndef _AIX
typedef long int64;
#endif
using std::string;
using std::vector;

@ -55,11 +55,12 @@ const int WireFormatLite::kMessageSetMessageTag;
#endif
// IBM xlC requires prefixing constants with WireFormatLite::
const int WireFormatLite::kMessageSetItemTagsSize =
io::CodedOutputStream::StaticVarintSize32<kMessageSetItemStartTag>::value +
io::CodedOutputStream::StaticVarintSize32<kMessageSetItemEndTag>::value +
io::CodedOutputStream::StaticVarintSize32<kMessageSetTypeIdTag>::value +
io::CodedOutputStream::StaticVarintSize32<kMessageSetMessageTag>::value;
io::CodedOutputStream::StaticVarintSize32<WireFormatLite::kMessageSetItemStartTag>::value +
io::CodedOutputStream::StaticVarintSize32<WireFormatLite::kMessageSetItemEndTag>::value +
io::CodedOutputStream::StaticVarintSize32<WireFormatLite::kMessageSetTypeIdTag>::value +
io::CodedOutputStream::StaticVarintSize32<WireFormatLite::kMessageSetMessageTag>::value;
const WireFormatLite::CppType
WireFormatLite::kFieldTypeToCppTypeMap[MAX_FIELD_TYPE + 1] = {

Loading…
Cancel
Save