|
|
@ -55,6 +55,13 @@ namespace google { |
|
|
|
namespace protobuf { |
|
|
|
namespace protobuf { |
|
|
|
namespace internal { |
|
|
|
namespace internal { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// To prevent sharing cache lines between threads
|
|
|
|
|
|
|
|
#ifdef __cpp_aligned_new |
|
|
|
|
|
|
|
enum { kCacheAlignment = 64 }; |
|
|
|
|
|
|
|
#else |
|
|
|
|
|
|
|
enum { kCacheAlignment = alignof(max_align_t) }; // do the best we can
|
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
inline constexpr size_t AlignUpTo8(size_t n) { |
|
|
|
inline constexpr size_t AlignUpTo8(size_t n) { |
|
|
|
// Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
|
|
|
|
// Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
|
|
|
|
return (n + 7) & static_cast<size_t>(-8); |
|
|
|
return (n + 7) & static_cast<size_t>(-8); |
|
|
@ -497,10 +504,10 @@ class PROTOBUF_EXPORT ThreadSafeArena { |
|
|
|
// have fallback function calls in tail position. This substantially improves
|
|
|
|
// have fallback function calls in tail position. This substantially improves
|
|
|
|
// code for the happy path.
|
|
|
|
// code for the happy path.
|
|
|
|
PROTOBUF_NDEBUG_INLINE bool MaybeAllocateAligned(size_t n, void** out) { |
|
|
|
PROTOBUF_NDEBUG_INLINE bool MaybeAllocateAligned(size_t n, void** out) { |
|
|
|
SerialArena* a; |
|
|
|
SerialArena* arena; |
|
|
|
if (PROTOBUF_PREDICT_TRUE(!alloc_policy_.should_record_allocs() && |
|
|
|
if (PROTOBUF_PREDICT_TRUE(!alloc_policy_.should_record_allocs() && |
|
|
|
GetSerialArenaFromThreadCache(&a))) { |
|
|
|
GetSerialArenaFromThreadCache(&arena))) { |
|
|
|
return a->MaybeAllocateAligned(n, out); |
|
|
|
return arena->MaybeAllocateAligned(n, out); |
|
|
|
} |
|
|
|
} |
|
|
|
return false; |
|
|
|
return false; |
|
|
|
} |
|
|
|
} |
|
|
@ -564,7 +571,7 @@ class PROTOBUF_EXPORT ThreadSafeArena { |
|
|
|
// fast path optimizes the case where a single thread uses multiple arenas.
|
|
|
|
// fast path optimizes the case where a single thread uses multiple arenas.
|
|
|
|
ThreadCache* tc = &thread_cache(); |
|
|
|
ThreadCache* tc = &thread_cache(); |
|
|
|
SerialArena* serial = hint_.load(std::memory_order_acquire); |
|
|
|
SerialArena* serial = hint_.load(std::memory_order_acquire); |
|
|
|
if (PROTOBUF_PREDICT_TRUE(serial != NULL && serial->owner() == tc)) { |
|
|
|
if (PROTOBUF_PREDICT_TRUE(serial != nullptr && serial->owner() == tc)) { |
|
|
|
*arena = serial; |
|
|
|
*arena = serial; |
|
|
|
return true; |
|
|
|
return true; |
|
|
|
} |
|
|
|
} |
|
|
@ -602,7 +609,7 @@ class PROTOBUF_EXPORT ThreadSafeArena { |
|
|
|
#ifdef _MSC_VER |
|
|
|
#ifdef _MSC_VER |
|
|
|
#pragma warning(disable : 4324) |
|
|
|
#pragma warning(disable : 4324) |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
struct alignas(64) ThreadCache { |
|
|
|
struct alignas(kCacheAlignment) ThreadCache { |
|
|
|
#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) |
|
|
|
#if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) |
|
|
|
// If we are using the ThreadLocalStorage class to store the ThreadCache,
|
|
|
|
// If we are using the ThreadLocalStorage class to store the ThreadCache,
|
|
|
|
// then the ThreadCache's default constructor has to be responsible for
|
|
|
|
// then the ThreadCache's default constructor has to be responsible for
|
|
|
@ -610,7 +617,7 @@ class PROTOBUF_EXPORT ThreadSafeArena { |
|
|
|
ThreadCache() |
|
|
|
ThreadCache() |
|
|
|
: next_lifecycle_id(0), |
|
|
|
: next_lifecycle_id(0), |
|
|
|
last_lifecycle_id_seen(-1), |
|
|
|
last_lifecycle_id_seen(-1), |
|
|
|
last_serial_arena(NULL) {} |
|
|
|
last_serial_arena(nullptr) {} |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
// Number of per-thread lifecycle IDs to reserve. Must be power of two.
|
|
|
|
// Number of per-thread lifecycle IDs to reserve. Must be power of two.
|
|
|
@ -633,7 +640,7 @@ class PROTOBUF_EXPORT ThreadSafeArena { |
|
|
|
#ifdef _MSC_VER |
|
|
|
#ifdef _MSC_VER |
|
|
|
#pragma warning(disable : 4324) |
|
|
|
#pragma warning(disable : 4324) |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
struct alignas(64) CacheAlignedLifecycleIdGenerator { |
|
|
|
struct alignas(kCacheAlignment) CacheAlignedLifecycleIdGenerator { |
|
|
|
std::atomic<LifecycleIdAtomic> id; |
|
|
|
std::atomic<LifecycleIdAtomic> id; |
|
|
|
}; |
|
|
|
}; |
|
|
|
static CacheAlignedLifecycleIdGenerator lifecycle_id_generator_; |
|
|
|
static CacheAlignedLifecycleIdGenerator lifecycle_id_generator_; |
|
|
|