Introduce `AllocateAtLeast()`

PiperOrigin-RevId: 504408238
pull/11654/head
Martijn Vels 2 years ago committed by Copybara-Service
parent 8915a5ae42
commit 6d09e2c80a
  1. 70
      src/google/protobuf/arena.cc
  2. 14
      src/google/protobuf/port.h
  3. 10
      src/google/protobuf/repeated_field.h
  4. 4
      src/google/protobuf/repeated_ptr_field.cc
  5. 9
      src/google/protobuf/serial_arena.h
  6. 2
      src/google/protobuf/thread_safe_arena.h

@ -81,8 +81,8 @@ ArenaBlock* SentryArenaBlock() {
} // namespace } // namespace
static SerialArena::Memory AllocateMemory(const AllocationPolicy* policy_ptr, static SizedPtr AllocateMemory(const AllocationPolicy* policy_ptr,
size_t last_size, size_t min_bytes) { size_t last_size, size_t min_bytes) {
AllocationPolicy policy; // default policy AllocationPolicy policy; // default policy
if (policy_ptr) policy = *policy_ptr; if (policy_ptr) policy = *policy_ptr;
size_t size; size_t size;
@ -98,13 +98,10 @@ static SerialArena::Memory AllocateMemory(const AllocationPolicy* policy_ptr,
SerialArena::kBlockHeaderSize); SerialArena::kBlockHeaderSize);
size = std::max(size, SerialArena::kBlockHeaderSize + min_bytes); size = std::max(size, SerialArena::kBlockHeaderSize + min_bytes);
void* mem;
if (policy.block_alloc == nullptr) { if (policy.block_alloc == nullptr) {
mem = ::operator new(size); return AllocateAtLeast(size);
} else {
mem = policy.block_alloc(size);
} }
return {mem, size}; return {policy.block_alloc(size), size};
} }
class GetDeallocator { class GetDeallocator {
@ -113,18 +110,18 @@ class GetDeallocator {
: dealloc_(policy ? policy->block_dealloc : nullptr), : dealloc_(policy ? policy->block_dealloc : nullptr),
space_allocated_(space_allocated) {} space_allocated_(space_allocated) {}
void operator()(SerialArena::Memory mem) const { void operator()(SizedPtr mem) const {
#ifdef ADDRESS_SANITIZER #ifdef ADDRESS_SANITIZER
// This memory was provided by the underlying allocator as unpoisoned, // This memory was provided by the underlying allocator as unpoisoned,
// so return it in an unpoisoned state. // so return it in an unpoisoned state.
ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size); ASAN_UNPOISON_MEMORY_REGION(mem.p, mem.n);
#endif // ADDRESS_SANITIZER #endif // ADDRESS_SANITIZER
if (dealloc_) { if (dealloc_) {
dealloc_(mem.ptr, mem.size); dealloc_(mem.p, mem.n);
} else { } else {
internal::SizedDelete(mem.ptr, mem.size); internal::SizedDelete(mem.p, mem.n);
} }
*space_allocated_ += mem.size; *space_allocated_ += mem.n;
} }
private: private:
@ -168,20 +165,19 @@ void SerialArena::Init(ArenaBlock* b, size_t offset) {
cached_blocks_ = nullptr; cached_blocks_ = nullptr;
} }
SerialArena* SerialArena::New(Memory mem, ThreadSafeArena& parent) { SerialArena* SerialArena::New(SizedPtr mem, ThreadSafeArena& parent) {
GOOGLE_ABSL_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize, GOOGLE_ABSL_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize, mem.n);
mem.size);
ThreadSafeArenaStats::RecordAllocateStats(parent.arena_stats_.MutableStats(), ThreadSafeArenaStats::RecordAllocateStats(parent.arena_stats_.MutableStats(),
/*used=*/0, /*allocated=*/mem.size, /*used=*/0, /*allocated=*/mem.n,
/*wasted=*/0); /*wasted=*/0);
auto b = new (mem.ptr) ArenaBlock{nullptr, mem.size}; auto b = new (mem.p) ArenaBlock{nullptr, mem.n};
return new (b->Pointer(kBlockHeaderSize)) SerialArena(b, parent); return new (b->Pointer(kBlockHeaderSize)) SerialArena(b, parent);
} }
template <typename Deallocator> template <typename Deallocator>
SerialArena::Memory SerialArena::Free(Deallocator deallocator) { SizedPtr SerialArena::Free(Deallocator deallocator) {
ArenaBlock* b = head(); ArenaBlock* b = head();
Memory mem = {b, b->size}; SizedPtr mem = {b, b->size};
while (b->next) { while (b->next) {
b = b->next; // We must first advance before deleting this block b = b->next; // We must first advance before deleting this block
deallocator(mem); deallocator(mem);
@ -236,12 +232,12 @@ void SerialArena::AllocateNewBlock(size_t n) {
// exclusive access to a cacheline. Hence we write it in terms of a // exclusive access to a cacheline. Hence we write it in terms of a
// regular add. // regular add.
space_allocated_.store( space_allocated_.store(
space_allocated_.load(std::memory_order_relaxed) + mem.size, space_allocated_.load(std::memory_order_relaxed) + mem.n,
std::memory_order_relaxed); std::memory_order_relaxed);
ThreadSafeArenaStats::RecordAllocateStats(parent_.arena_stats_.MutableStats(), ThreadSafeArenaStats::RecordAllocateStats(parent_.arena_stats_.MutableStats(),
/*used=*/used, /*used=*/used,
/*allocated=*/mem.size, wasted); /*allocated=*/mem.n, wasted);
auto* new_head = new (mem.ptr) ArenaBlock{old_head, mem.size}; auto* new_head = new (mem.p) ArenaBlock{old_head, mem.n};
set_ptr(new_head->Pointer(kBlockHeaderSize)); set_ptr(new_head->Pointer(kBlockHeaderSize));
limit_ = new_head->Limit(); limit_ = new_head->Limit();
// Previous writes must take effect before writing new head. // Previous writes must take effect before writing new head.
@ -490,7 +486,7 @@ ArenaBlock* ThreadSafeArena::FirstBlock(void* buf, size_t size,
GOOGLE_ABSL_DCHECK_EQ(reinterpret_cast<uintptr_t>(buf) & 7, 0u); GOOGLE_ABSL_DCHECK_EQ(reinterpret_cast<uintptr_t>(buf) & 7, 0u);
SerialArena::Memory mem; SizedPtr mem;
if (buf == nullptr || size < kBlockHeaderSize + kAllocPolicySize) { if (buf == nullptr || size < kBlockHeaderSize + kAllocPolicySize) {
mem = AllocateMemory(&policy, 0, kAllocPolicySize); mem = AllocateMemory(&policy, 0, kAllocPolicySize);
} else { } else {
@ -499,7 +495,7 @@ ArenaBlock* ThreadSafeArena::FirstBlock(void* buf, size_t size,
alloc_policy_.set_is_user_owned_initial_block(true); alloc_policy_.set_is_user_owned_initial_block(true);
} }
return new (mem.ptr) ArenaBlock{nullptr, mem.size}; return new (mem.p) ArenaBlock{nullptr, mem.n};
} }
void ThreadSafeArena::InitializeWithPolicy(const AllocationPolicy& policy) { void ThreadSafeArena::InitializeWithPolicy(const AllocationPolicy& policy) {
@ -566,10 +562,14 @@ ThreadSafeArena::SerialArenaChunk* ThreadSafeArena::NewSerialArenaChunk(
static_cast<uint32_t>(next_bytes - kHeaderSize) / kEntrySize; static_cast<uint32_t>(next_bytes - kHeaderSize) / kEntrySize;
// Growth based on bytes needs to be adjusted by AllocSize. // Growth based on bytes needs to be adjusted by AllocSize.
next_bytes = SerialArenaChunk::AllocSize(next_capacity); next_bytes = SerialArenaChunk::AllocSize(next_capacity);
void* mem;
mem = ::operator new(next_bytes);
return new (mem) SerialArenaChunk{next_capacity, id, serial}; // If we allocate bigger memory than requested, we should expand
// size to use that extra space, and add extra entries permitted
// by the extra space.
SizedPtr mem = AllocateAtLeast(next_bytes);
next_capacity = static_cast<uint32_t>(mem.n - kHeaderSize) / kEntrySize;
GOOGLE_ABSL_DCHECK_LE(SerialArenaChunk::AllocSize(next_capacity), mem.n);
return new (mem.p) SerialArenaChunk{next_capacity, id, serial};
} }
// Tries to reserve an entry by atomic fetch_add. If the head chunk is already // Tries to reserve an entry by atomic fetch_add. If the head chunk is already
@ -627,15 +627,15 @@ ThreadSafeArena::~ThreadSafeArena() {
if (alloc_policy_.is_user_owned_initial_block()) { if (alloc_policy_.is_user_owned_initial_block()) {
#ifdef ADDRESS_SANITIZER #ifdef ADDRESS_SANITIZER
// Unpoison the initial block, now that it's going back to the user. // Unpoison the initial block, now that it's going back to the user.
ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size); ASAN_UNPOISON_MEMORY_REGION(mem.p, mem.n);
#endif // ADDRESS_SANITIZER #endif // ADDRESS_SANITIZER
space_allocated += mem.size; space_allocated += mem.n;
} else if (mem.size > 0) { } else if (mem.n > 0) {
GetDeallocator(alloc_policy_.get(), &space_allocated)(mem); GetDeallocator(alloc_policy_.get(), &space_allocated)(mem);
} }
} }
SerialArena::Memory ThreadSafeArena::Free(size_t* space_allocated) { SizedPtr ThreadSafeArena::Free(size_t* space_allocated) {
auto deallocator = GetDeallocator(alloc_policy_.get(), space_allocated); auto deallocator = GetDeallocator(alloc_policy_.get(), space_allocated);
WalkSerialArenaChunk([deallocator](SerialArenaChunk* chunk) { WalkSerialArenaChunk([deallocator](SerialArenaChunk* chunk) {
@ -647,8 +647,8 @@ SerialArena::Memory ThreadSafeArena::Free(size_t* space_allocated) {
SerialArena* serial = it->load(std::memory_order_relaxed); SerialArena* serial = it->load(std::memory_order_relaxed);
GOOGLE_ABSL_DCHECK_NE(serial, nullptr); GOOGLE_ABSL_DCHECK_NE(serial, nullptr);
// Always frees the first block of "serial" as it cannot be user-provided. // Always frees the first block of "serial" as it cannot be user-provided.
SerialArena::Memory mem = serial->Free(deallocator); SizedPtr mem = serial->Free(deallocator);
GOOGLE_ABSL_DCHECK_NE(mem.ptr, nullptr); GOOGLE_ABSL_DCHECK_NE(mem.p, nullptr);
deallocator(mem); deallocator(mem);
} }
@ -670,7 +670,7 @@ uint64_t ThreadSafeArena::Reset() {
// allocated, always reuse the first block for the first arena. // allocated, always reuse the first block for the first arena.
size_t space_allocated = 0; size_t space_allocated = 0;
auto mem = Free(&space_allocated); auto mem = Free(&space_allocated);
space_allocated += mem.size; space_allocated += mem.n;
// Reset the first arena with the first block. This avoids redundant // Reset the first arena with the first block. This avoids redundant
// free / allocation and re-allocating for AllocationPolicy. Adjust offset if // free / allocation and re-allocating for AllocationPolicy. Adjust offset if
@ -680,7 +680,7 @@ uint64_t ThreadSafeArena::Reset() {
size_t offset = alloc_policy_.get() == nullptr size_t offset = alloc_policy_.get() == nullptr
? kBlockHeaderSize ? kBlockHeaderSize
: kBlockHeaderSize + kAllocPolicySize; : kBlockHeaderSize + kAllocPolicySize;
first_arena_.Init(new (mem.ptr) ArenaBlock{nullptr, mem.size}, offset); first_arena_.Init(new (mem.p) ArenaBlock{nullptr, mem.n}, offset);
} else { } else {
first_arena_.Init(SentryArenaBlock(), 0); first_arena_.Init(SentryArenaBlock(), 0);
} }

@ -57,6 +57,20 @@ class MessageLite;
namespace internal { namespace internal {
// See comments on `AllocateAtLeast` for information on size returning new.
struct SizedPtr {
void* p;
size_t n;
};
// Allocates at least `size` bytes. This function follows the c++ language
// proposal from D0901R10 (http://wg21.link/D0901R10) and will be implemented
// in terms of the new operator new semantics when available. The allocated
// memory should be released by a call to `SizedDelete` or `::operator delete`.
inline SizedPtr AllocateAtLeast(size_t size) {
return {::operator new(size), size};
}
inline void SizedDelete(void* p, size_t size) { inline void SizedDelete(void* p, size_t size) {
#if defined(__cpp_sized_deallocation) #if defined(__cpp_sized_deallocation)
::operator delete(p, size); ::operator delete(p, size);

@ -957,7 +957,15 @@ PROTOBUF_NOINLINE void RepeatedField<Element>::GrowNoAnnotate(int current_size,
size_t bytes = size_t bytes =
kRepHeaderSize + sizeof(Element) * static_cast<size_t>(new_size); kRepHeaderSize + sizeof(Element) * static_cast<size_t>(new_size);
if (arena == nullptr) { if (arena == nullptr) {
new_rep = static_cast<Rep*>(::operator new(bytes)); GOOGLE_ABSL_DCHECK_LE((bytes - kRepHeaderSize) / sizeof(Element),
static_cast<size_t>(std::numeric_limits<int>::max()))
<< "Requested size is too large to fit element count into int.";
internal::SizedPtr res = internal::AllocateAtLeast(bytes);
size_t num_available =
std::min((res.n - kRepHeaderSize) / sizeof(Element),
static_cast<size_t>(std::numeric_limits<int>::max()));
new_size = static_cast<int>(num_available);
new_rep = static_cast<Rep*>(res.p);
} else { } else {
new_rep = reinterpret_cast<Rep*>(Arena::CreateArray<char>(arena, bytes)); new_rep = reinterpret_cast<Rep*>(Arena::CreateArray<char>(arena, bytes));
} }

@ -65,7 +65,9 @@ void** RepeatedPtrFieldBase::InternalExtend(int extend_amount) {
<< "Requested size is too large to fit into size_t."; << "Requested size is too large to fit into size_t.";
size_t bytes = kRepHeaderSize + sizeof(old_rep->elements[0]) * new_size; size_t bytes = kRepHeaderSize + sizeof(old_rep->elements[0]) * new_size;
if (arena == nullptr) { if (arena == nullptr) {
rep_ = reinterpret_cast<Rep*>(::operator new(bytes)); internal::SizedPtr res = internal::AllocateAtLeast(bytes);
new_size = (res.n - kRepHeaderSize) / sizeof(old_rep->elements[0]);
rep_ = reinterpret_cast<Rep*>(res.p);
} else { } else {
rep_ = reinterpret_cast<Rep*>(Arena::CreateArray<char>(arena, bytes)); rep_ = reinterpret_cast<Rep*>(Arena::CreateArray<char>(arena, bytes));
} }

@ -105,11 +105,6 @@ struct FirstSerialArena {
// used. // used.
class PROTOBUF_EXPORT SerialArena { class PROTOBUF_EXPORT SerialArena {
public: public:
struct Memory {
void* ptr;
size_t size;
};
void CleanupList(); void CleanupList();
uint64_t SpaceAllocated() const { uint64_t SpaceAllocated() const {
return space_allocated_.load(std::memory_order_relaxed); return space_allocated_.load(std::memory_order_relaxed);
@ -315,10 +310,10 @@ class PROTOBUF_EXPORT SerialArena {
// future allocations. // future allocations.
// The `parent` arena must outlive the serial arena, which is guaranteed // The `parent` arena must outlive the serial arena, which is guaranteed
// because the parent manages the lifetime of the serial arenas. // because the parent manages the lifetime of the serial arenas.
static SerialArena* New(SerialArena::Memory mem, ThreadSafeArena& parent); static SerialArena* New(SizedPtr mem, ThreadSafeArena& parent);
// Free SerialArena returning the memory passed in to New // Free SerialArena returning the memory passed in to New
template <typename Deallocator> template <typename Deallocator>
Memory Free(Deallocator deallocator); SizedPtr Free(Deallocator deallocator);
// Members are declared here to track sizeof(SerialArena) and hotness // Members are declared here to track sizeof(SerialArena) and hotness
// centrally. They are (roughly) laid out in descending order of hotness. // centrally. They are (roughly) laid out in descending order of hotness.

@ -222,7 +222,7 @@ class PROTOBUF_EXPORT ThreadSafeArena {
// Releases all memory except the first block which it returns. The first // Releases all memory except the first block which it returns. The first
// block might be owned by the user and thus need some extra checks before // block might be owned by the user and thus need some extra checks before
// deleting. // deleting.
SerialArena::Memory Free(size_t* space_allocated); SizedPtr Free(size_t* space_allocated);
#ifdef _MSC_VER #ifdef _MSC_VER
#pragma warning(disable : 4324) #pragma warning(disable : 4324)

Loading…
Cancel
Save