Introduce `AllocateAtLeast()`

PiperOrigin-RevId: 504408238
pull/11654/head
Martijn Vels 2 years ago committed by Copybara-Service
parent 8915a5ae42
commit 6d09e2c80a
  1. 68
      src/google/protobuf/arena.cc
  2. 14
      src/google/protobuf/port.h
  3. 10
      src/google/protobuf/repeated_field.h
  4. 4
      src/google/protobuf/repeated_ptr_field.cc
  5. 9
      src/google/protobuf/serial_arena.h
  6. 2
      src/google/protobuf/thread_safe_arena.h

@ -81,7 +81,7 @@ ArenaBlock* SentryArenaBlock() {
} // namespace
static SerialArena::Memory AllocateMemory(const AllocationPolicy* policy_ptr,
static SizedPtr AllocateMemory(const AllocationPolicy* policy_ptr,
size_t last_size, size_t min_bytes) {
AllocationPolicy policy; // default policy
if (policy_ptr) policy = *policy_ptr;
@ -98,13 +98,10 @@ static SerialArena::Memory AllocateMemory(const AllocationPolicy* policy_ptr,
SerialArena::kBlockHeaderSize);
size = std::max(size, SerialArena::kBlockHeaderSize + min_bytes);
void* mem;
if (policy.block_alloc == nullptr) {
mem = ::operator new(size);
} else {
mem = policy.block_alloc(size);
return AllocateAtLeast(size);
}
return {mem, size};
return {policy.block_alloc(size), size};
}
class GetDeallocator {
@ -113,18 +110,18 @@ class GetDeallocator {
: dealloc_(policy ? policy->block_dealloc : nullptr),
space_allocated_(space_allocated) {}
void operator()(SerialArena::Memory mem) const {
void operator()(SizedPtr mem) const {
#ifdef ADDRESS_SANITIZER
// This memory was provided by the underlying allocator as unpoisoned,
// so return it in an unpoisoned state.
ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size);
ASAN_UNPOISON_MEMORY_REGION(mem.p, mem.n);
#endif // ADDRESS_SANITIZER
if (dealloc_) {
dealloc_(mem.ptr, mem.size);
dealloc_(mem.p, mem.n);
} else {
internal::SizedDelete(mem.ptr, mem.size);
internal::SizedDelete(mem.p, mem.n);
}
*space_allocated_ += mem.size;
*space_allocated_ += mem.n;
}
private:
@ -168,20 +165,19 @@ void SerialArena::Init(ArenaBlock* b, size_t offset) {
cached_blocks_ = nullptr;
}
SerialArena* SerialArena::New(Memory mem, ThreadSafeArena& parent) {
GOOGLE_ABSL_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize,
mem.size);
SerialArena* SerialArena::New(SizedPtr mem, ThreadSafeArena& parent) {
GOOGLE_ABSL_DCHECK_LE(kBlockHeaderSize + ThreadSafeArena::kSerialArenaSize, mem.n);
ThreadSafeArenaStats::RecordAllocateStats(parent.arena_stats_.MutableStats(),
/*used=*/0, /*allocated=*/mem.size,
/*used=*/0, /*allocated=*/mem.n,
/*wasted=*/0);
auto b = new (mem.ptr) ArenaBlock{nullptr, mem.size};
auto b = new (mem.p) ArenaBlock{nullptr, mem.n};
return new (b->Pointer(kBlockHeaderSize)) SerialArena(b, parent);
}
template <typename Deallocator>
SerialArena::Memory SerialArena::Free(Deallocator deallocator) {
SizedPtr SerialArena::Free(Deallocator deallocator) {
ArenaBlock* b = head();
Memory mem = {b, b->size};
SizedPtr mem = {b, b->size};
while (b->next) {
b = b->next; // We must first advance before deleting this block
deallocator(mem);
@ -236,12 +232,12 @@ void SerialArena::AllocateNewBlock(size_t n) {
// exclusive access to a cacheline. Hence we write it in terms of a
// regular add.
space_allocated_.store(
space_allocated_.load(std::memory_order_relaxed) + mem.size,
space_allocated_.load(std::memory_order_relaxed) + mem.n,
std::memory_order_relaxed);
ThreadSafeArenaStats::RecordAllocateStats(parent_.arena_stats_.MutableStats(),
/*used=*/used,
/*allocated=*/mem.size, wasted);
auto* new_head = new (mem.ptr) ArenaBlock{old_head, mem.size};
/*allocated=*/mem.n, wasted);
auto* new_head = new (mem.p) ArenaBlock{old_head, mem.n};
set_ptr(new_head->Pointer(kBlockHeaderSize));
limit_ = new_head->Limit();
// Previous writes must take effect before writing new head.
@ -490,7 +486,7 @@ ArenaBlock* ThreadSafeArena::FirstBlock(void* buf, size_t size,
GOOGLE_ABSL_DCHECK_EQ(reinterpret_cast<uintptr_t>(buf) & 7, 0u);
SerialArena::Memory mem;
SizedPtr mem;
if (buf == nullptr || size < kBlockHeaderSize + kAllocPolicySize) {
mem = AllocateMemory(&policy, 0, kAllocPolicySize);
} else {
@ -499,7 +495,7 @@ ArenaBlock* ThreadSafeArena::FirstBlock(void* buf, size_t size,
alloc_policy_.set_is_user_owned_initial_block(true);
}
return new (mem.ptr) ArenaBlock{nullptr, mem.size};
return new (mem.p) ArenaBlock{nullptr, mem.n};
}
void ThreadSafeArena::InitializeWithPolicy(const AllocationPolicy& policy) {
@ -566,10 +562,14 @@ ThreadSafeArena::SerialArenaChunk* ThreadSafeArena::NewSerialArenaChunk(
static_cast<uint32_t>(next_bytes - kHeaderSize) / kEntrySize;
// Growth based on bytes needs to be adjusted by AllocSize.
next_bytes = SerialArenaChunk::AllocSize(next_capacity);
void* mem;
mem = ::operator new(next_bytes);
return new (mem) SerialArenaChunk{next_capacity, id, serial};
// If we allocate bigger memory than requested, we should expand
// size to use that extra space, and add extra entries permitted
// by the extra space.
SizedPtr mem = AllocateAtLeast(next_bytes);
next_capacity = static_cast<uint32_t>(mem.n - kHeaderSize) / kEntrySize;
GOOGLE_ABSL_DCHECK_LE(SerialArenaChunk::AllocSize(next_capacity), mem.n);
return new (mem.p) SerialArenaChunk{next_capacity, id, serial};
}
// Tries to reserve an entry by atomic fetch_add. If the head chunk is already
@ -627,15 +627,15 @@ ThreadSafeArena::~ThreadSafeArena() {
if (alloc_policy_.is_user_owned_initial_block()) {
#ifdef ADDRESS_SANITIZER
// Unpoison the initial block, now that it's going back to the user.
ASAN_UNPOISON_MEMORY_REGION(mem.ptr, mem.size);
ASAN_UNPOISON_MEMORY_REGION(mem.p, mem.n);
#endif // ADDRESS_SANITIZER
space_allocated += mem.size;
} else if (mem.size > 0) {
space_allocated += mem.n;
} else if (mem.n > 0) {
GetDeallocator(alloc_policy_.get(), &space_allocated)(mem);
}
}
SerialArena::Memory ThreadSafeArena::Free(size_t* space_allocated) {
SizedPtr ThreadSafeArena::Free(size_t* space_allocated) {
auto deallocator = GetDeallocator(alloc_policy_.get(), space_allocated);
WalkSerialArenaChunk([deallocator](SerialArenaChunk* chunk) {
@ -647,8 +647,8 @@ SerialArena::Memory ThreadSafeArena::Free(size_t* space_allocated) {
SerialArena* serial = it->load(std::memory_order_relaxed);
GOOGLE_ABSL_DCHECK_NE(serial, nullptr);
// Always frees the first block of "serial" as it cannot be user-provided.
SerialArena::Memory mem = serial->Free(deallocator);
GOOGLE_ABSL_DCHECK_NE(mem.ptr, nullptr);
SizedPtr mem = serial->Free(deallocator);
GOOGLE_ABSL_DCHECK_NE(mem.p, nullptr);
deallocator(mem);
}
@ -670,7 +670,7 @@ uint64_t ThreadSafeArena::Reset() {
// allocated, always reuse the first block for the first arena.
size_t space_allocated = 0;
auto mem = Free(&space_allocated);
space_allocated += mem.size;
space_allocated += mem.n;
// Reset the first arena with the first block. This avoids redundant
// free / allocation and re-allocating for AllocationPolicy. Adjust offset if
@ -680,7 +680,7 @@ uint64_t ThreadSafeArena::Reset() {
size_t offset = alloc_policy_.get() == nullptr
? kBlockHeaderSize
: kBlockHeaderSize + kAllocPolicySize;
first_arena_.Init(new (mem.ptr) ArenaBlock{nullptr, mem.size}, offset);
first_arena_.Init(new (mem.p) ArenaBlock{nullptr, mem.n}, offset);
} else {
first_arena_.Init(SentryArenaBlock(), 0);
}

@ -57,6 +57,20 @@ class MessageLite;
namespace internal {
// See comments on `AllocateAtLeast` for information on size returning new.
struct SizedPtr {
void* p;
size_t n;
};
// Allocates at least `size` bytes. This function follows the c++ language
// proposal from D0901R10 (http://wg21.link/D0901R10) and will be implemented
// in terms of the new operator new semantics when available. The allocated
// memory should be released by a call to `SizedDelete` or `::operator delete`.
inline SizedPtr AllocateAtLeast(size_t size) {
return {::operator new(size), size};
}
inline void SizedDelete(void* p, size_t size) {
#if defined(__cpp_sized_deallocation)
::operator delete(p, size);

@ -957,7 +957,15 @@ PROTOBUF_NOINLINE void RepeatedField<Element>::GrowNoAnnotate(int current_size,
size_t bytes =
kRepHeaderSize + sizeof(Element) * static_cast<size_t>(new_size);
if (arena == nullptr) {
new_rep = static_cast<Rep*>(::operator new(bytes));
GOOGLE_ABSL_DCHECK_LE((bytes - kRepHeaderSize) / sizeof(Element),
static_cast<size_t>(std::numeric_limits<int>::max()))
<< "Requested size is too large to fit element count into int.";
internal::SizedPtr res = internal::AllocateAtLeast(bytes);
size_t num_available =
std::min((res.n - kRepHeaderSize) / sizeof(Element),
static_cast<size_t>(std::numeric_limits<int>::max()));
new_size = static_cast<int>(num_available);
new_rep = static_cast<Rep*>(res.p);
} else {
new_rep = reinterpret_cast<Rep*>(Arena::CreateArray<char>(arena, bytes));
}

@ -65,7 +65,9 @@ void** RepeatedPtrFieldBase::InternalExtend(int extend_amount) {
<< "Requested size is too large to fit into size_t.";
size_t bytes = kRepHeaderSize + sizeof(old_rep->elements[0]) * new_size;
if (arena == nullptr) {
rep_ = reinterpret_cast<Rep*>(::operator new(bytes));
internal::SizedPtr res = internal::AllocateAtLeast(bytes);
new_size = (res.n - kRepHeaderSize) / sizeof(old_rep->elements[0]);
rep_ = reinterpret_cast<Rep*>(res.p);
} else {
rep_ = reinterpret_cast<Rep*>(Arena::CreateArray<char>(arena, bytes));
}

@ -105,11 +105,6 @@ struct FirstSerialArena {
// used.
class PROTOBUF_EXPORT SerialArena {
public:
struct Memory {
void* ptr;
size_t size;
};
void CleanupList();
uint64_t SpaceAllocated() const {
return space_allocated_.load(std::memory_order_relaxed);
@ -315,10 +310,10 @@ class PROTOBUF_EXPORT SerialArena {
// future allocations.
// The `parent` arena must outlive the serial arena, which is guaranteed
// because the parent manages the lifetime of the serial arenas.
static SerialArena* New(SerialArena::Memory mem, ThreadSafeArena& parent);
static SerialArena* New(SizedPtr mem, ThreadSafeArena& parent);
// Free SerialArena returning the memory passed in to New
template <typename Deallocator>
Memory Free(Deallocator deallocator);
SizedPtr Free(Deallocator deallocator);
// Members are declared here to track sizeof(SerialArena) and hotness
// centrally. They are (roughly) laid out in descending order of hotness.

@ -222,7 +222,7 @@ class PROTOBUF_EXPORT ThreadSafeArena {
// Releases all memory except the first block which it returns. The first
// block might be owned by the user and thus need some extra checks before
// deleting.
SerialArena::Memory Free(size_t* space_allocated);
SizedPtr Free(size_t* space_allocated);
#ifdef _MSC_VER
#pragma warning(disable : 4324)

Loading…
Cancel
Save