Removed the cleanup list

It no longer has any users. If we need it later, we can add it back.

This saves one pointer of memory from `sizeof(upb_Arena)`.

Also, we now allow fuses if if the block allocators differ.  This is made possible by cl/520144430, but was not take advantage of in that CL.

PiperOrigin-RevId: 520174588
pull/13171/head
Joshua Haberman 2 years ago committed by Copybara-Service
parent a73852630f
commit 73489a9339
  1. 77
      upb/mem/arena.c
  2. 4
      upb/mem/arena.h
  3. 44
      upb/mem/arena_internal.h
  4. 27
      upb/mem/arena_test.cc
  5. 61
      upb/test/test_cpp.cc
  6. 84
      upb/test/test_generated_code.cc
  7. 8
      upb/upb.hpp
  8. 2
      upb/wire/decode.c

@ -31,14 +31,6 @@
// Must be last.
#include "upb/port/def.inc"
static uint32_t* upb_cleanup_pointer(uintptr_t cleanup_metadata) {
return (uint32_t*)(cleanup_metadata & ~0x1);
}
static bool upb_cleanup_has_initial_block(uintptr_t cleanup_metadata) {
return cleanup_metadata & 0x1;
}
static uintptr_t upb_cleanup_metadata(uint32_t* cleanup,
bool has_initial_block) {
return (uintptr_t)cleanup | has_initial_block;
@ -48,15 +40,9 @@ struct _upb_MemBlock {
// Atomic only for the benefit of SpaceAllocated().
UPB_ATOMIC(_upb_MemBlock*) next;
uint32_t size;
uint32_t cleanups;
// Data follows.
};
typedef struct cleanup_ent {
upb_CleanupFunc* cleanup;
void* ud;
} cleanup_ent;
static const size_t memblock_reserve =
UPB_ALIGN_UP(sizeof(_upb_MemBlock), UPB_MALLOC_ALIGN);
@ -128,13 +114,10 @@ static void upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
// Insert into linked list.
upb_Atomic_Init(&block->next, a->blocks);
block->size = (uint32_t)size;
block->cleanups = 0;
upb_Atomic_Store(&a->blocks, block, memory_order_relaxed);
a->head.ptr = UPB_PTR_AT(block, memblock_reserve, char);
a->head.end = UPB_PTR_AT(block, size, char);
a->cleanup_metadata = upb_cleanup_metadata(
&block->cleanups, upb_cleanup_has_initial_block(a->cleanup_metadata));
UPB_POISON_MEMORY_REGION(a->head.ptr, a->head.end - a->head.ptr);
}
@ -144,7 +127,7 @@ static bool upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
_upb_MemBlock* last_block = upb_Atomic_Load(&a->blocks, memory_order_relaxed);
size_t last_size = last_block != NULL ? last_block->size : 128;
size_t block_size = UPB_MAX(size, last_size * 2) + memblock_reserve;
_upb_MemBlock* block = upb_malloc(a->block_alloc, block_size);
_upb_MemBlock* block = upb_malloc(upb_Arena_BlockAlloc(a), block_size);
if (!block) return false;
upb_Arena_AddBlock(a, block, block_size);
@ -159,12 +142,13 @@ void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size) {
/* Public Arena API ***********************************************************/
static upb_Arena* arena_initslow(void* mem, size_t n, upb_alloc* alloc) {
static upb_Arena* upb_Arena_InitSlow(upb_alloc* alloc) {
const size_t first_block_overhead = sizeof(upb_Arena) + memblock_reserve;
upb_Arena* a;
/* We need to malloc the initial block. */
n = first_block_overhead + 256;
char* mem;
size_t n = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, n))) {
return NULL;
}
@ -172,12 +156,11 @@ static upb_Arena* arena_initslow(void* mem, size_t n, upb_alloc* alloc) {
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
n -= sizeof(*a);
a->block_alloc = alloc;
a->block_alloc = upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
a->cleanup_metadata = upb_cleanup_metadata(NULL, false);
upb_Arena_AddBlock(a, mem, n);
@ -200,7 +183,7 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_Arena));
if (UPB_UNLIKELY(n < sizeof(upb_Arena))) {
return arena_initslow(mem, n, alloc);
return upb_Arena_InitSlow(alloc);
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
@ -209,10 +192,9 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
a->block_alloc = alloc;
a->block_alloc = upb_Arena_MakeBlockAlloc(alloc, 1);
a->head.ptr = mem;
a->head.end = UPB_PTR_AT(mem, n - sizeof(*a), char);
a->cleanup_metadata = upb_cleanup_metadata(NULL, true);
return a;
}
@ -224,22 +206,13 @@ static void arena_dofree(upb_Arena* a) {
// Load first since arena itself is likely from one of its blocks.
upb_Arena* next_arena =
(upb_Arena*)upb_Atomic_Load(&a->next, memory_order_acquire);
upb_alloc* block_alloc = upb_Arena_BlockAlloc(a);
_upb_MemBlock* block = upb_Atomic_Load(&a->blocks, memory_order_relaxed);
while (block != NULL) {
/* Load first since we are deleting block. */
// Load first since we are deleting block.
_upb_MemBlock* next_block =
upb_Atomic_Load(&block->next, memory_order_relaxed);
if (block->cleanups > 0) {
cleanup_ent* end = UPB_PTR_AT(block, block->size, void);
cleanup_ent* ptr = end - block->cleanups;
for (; ptr < end; ptr++) {
ptr->cleanup(ptr->ud);
}
}
upb_free(a->block_alloc, block);
upb_free(block_alloc, block);
block = next_block;
}
a = next_arena;
@ -275,27 +248,6 @@ retry:
goto retry;
}
bool upb_Arena_AddCleanup(upb_Arena* a, void* ud, upb_CleanupFunc* func) {
cleanup_ent* ent;
uint32_t* cleanups = upb_cleanup_pointer(a->cleanup_metadata);
if (!cleanups || _upb_ArenaHas(a) < sizeof(cleanup_ent)) {
if (!upb_Arena_AllocBlock(a, 128)) return false; /* Out of memory. */
UPB_ASSERT(_upb_ArenaHas(a) >= sizeof(cleanup_ent));
cleanups = upb_cleanup_pointer(a->cleanup_metadata);
}
a->head.end -= sizeof(cleanup_ent);
ent = (cleanup_ent*)a->head.end;
(*cleanups)++;
UPB_UNPOISON_MEMORY_REGION(ent, sizeof(cleanup_ent));
ent->cleanup = func;
ent->ud = ud;
return true;
}
bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
// SAFE IN THE PRESENCE OF FUSE/FREE RACES BUT NOT IN THE
// PRESENCE OF FUSE/FUSE RACES!!!
@ -318,17 +270,16 @@ bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
// that only refcounts can change once we have found the root. Because the
// threads doing the fuse must hold references, we can guarantee that no
// refcounts will reach zero concurrently.
upb_Arena* r1 = _upb_Arena_FindRoot(a1);
upb_Arena* r2 = _upb_Arena_FindRoot(a2);
if (r1 == r2) return true; // Already fused.
// Do not fuse initial blocks since we cannot lifetime extend them.
if (upb_cleanup_has_initial_block(r1->cleanup_metadata)) return false;
if (upb_cleanup_has_initial_block(r2->cleanup_metadata)) return false;
// Only allow fuse with a common allocator
if (r1->block_alloc != r2->block_alloc) return false;
// Any other fuse scenario is allowed.
if (upb_Arena_HasInitialBlock(r1)) return false;
if (upb_Arena_HasInitialBlock(r2)) return false;
uintptr_t r1_poc =
upb_Atomic_Load(&r1->parent_or_count, memory_order_acquire);

@ -49,8 +49,6 @@
typedef struct upb_Arena upb_Arena;
typedef void upb_CleanupFunc(void* context);
typedef struct {
char *ptr, *end;
} _upb_ArenaHead;
@ -65,8 +63,6 @@ extern "C" {
UPB_API upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc);
UPB_API void upb_Arena_Free(upb_Arena* a);
UPB_API bool upb_Arena_AddCleanup(upb_Arena* a, void* ud,
upb_CleanupFunc* func);
UPB_API bool upb_Arena_Fuse(upb_Arena* a, upb_Arena* b);
void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size);

@ -37,23 +37,18 @@ typedef struct _upb_MemBlock _upb_MemBlock;
struct upb_Arena {
_upb_ArenaHead head;
/* Stores cleanup metadata for this arena.
* - a pointer to the current cleanup counter.
* - a boolean indicating if there is an unowned initial block. */
uintptr_t cleanup_metadata;
/* Allocator to allocate arena blocks. We are responsible for freeing these
* when we are destroyed. */
upb_alloc* block_alloc;
/* When multiple arenas are fused together, each arena points to a parent
* arena (root points to itself). The root tracks how many live arenas
* reference it.
*
* The low bit is tagged:
* 0: pointer to parent
* 1: count, left shifted by one
*/
// upb_alloc* together with a low bit which signals if there is an initial
// block.
uintptr_t block_alloc;
// When multiple arenas are fused together, each arena points to a parent
// arena (root points to itself). The root tracks how many live arenas
// reference it.
// The low bit is tagged:
// 0: pointer to parent
// 1: count, left shifted by one
UPB_ATOMIC(uintptr_t) parent_or_count;
// All nodes that are fused together are in a singly-linked list.
@ -99,6 +94,21 @@ UPB_INLINE uintptr_t _upb_Arena_TaggedFromPointer(upb_Arena* a) {
return parent_or_count;
}
UPB_INLINE upb_alloc* upb_Arena_BlockAlloc(upb_Arena* arena) {
return (upb_alloc*)(arena->block_alloc & ~0x1);
}
UPB_INLINE uintptr_t upb_Arena_MakeBlockAlloc(upb_alloc* alloc,
bool has_initial) {
uintptr_t alloc_uint = (uintptr_t)alloc;
UPB_ASSERT((alloc_uint & 1) == 0);
return alloc_uint | (has_initial ? 1 : 0);
}
UPB_INLINE bool upb_Arena_HasInitialBlock(upb_Arena* arena) {
return arena->block_alloc & 0x1;
}
#include "upb/port/undef.inc"
#endif /* UPB_MEM_ARENA_INTERNAL_H_ */

@ -17,38 +17,14 @@
namespace {
static void decrement_int(void* ptr) {
int* iptr = static_cast<int*>(ptr);
(*iptr)--;
}
TEST(ArenaTest, ArenaFuse) {
int i1 = 5;
int i2 = 5;
int i3 = 5;
int i4 = 5;
upb_Arena* arena1 = upb_Arena_New();
upb_Arena* arena2 = upb_Arena_New();
upb_Arena_AddCleanup(arena1, &i1, decrement_int);
upb_Arena_AddCleanup(arena2, &i2, decrement_int);
EXPECT_TRUE(upb_Arena_Fuse(arena1, arena2));
upb_Arena_AddCleanup(arena1, &i3, decrement_int);
upb_Arena_AddCleanup(arena2, &i4, decrement_int);
upb_Arena_Free(arena1);
EXPECT_EQ(5, i1);
EXPECT_EQ(5, i2);
EXPECT_EQ(5, i3);
EXPECT_EQ(5, i4);
upb_Arena_Free(arena2);
EXPECT_EQ(4, i1);
EXPECT_EQ(4, i2);
EXPECT_EQ(4, i3);
EXPECT_EQ(4, i4);
}
/* Do nothing allocator for testing */
@ -56,19 +32,18 @@ extern "C" void* TestAllocFunc(upb_alloc* alloc, void* ptr, size_t oldsize,
size_t size) {
return upb_alloc_global.func(alloc, ptr, oldsize, size);
}
ABSL_CONST_INIT upb_alloc test_alloc = {&TestAllocFunc};
TEST(ArenaTest, FuseWithInitialBlock) {
char buf1[1024];
char buf2[1024];
upb_Arena* arenas[] = {upb_Arena_Init(buf1, 1024, &upb_alloc_global),
upb_Arena_Init(buf2, 1024, &upb_alloc_global),
upb_Arena_Init(NULL, 0, &test_alloc),
upb_Arena_Init(NULL, 0, &upb_alloc_global)};
int size = sizeof(arenas) / sizeof(arenas[0]);
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
if (i == j) {
// Fuse to self is always allowed.
EXPECT_TRUE(upb_Arena_Fuse(arenas[i], arenas[j]));
} else {
EXPECT_FALSE(upb_Arena_Fuse(arenas[i], arenas[j]));

@ -65,67 +65,6 @@ TEST(Cpp, Iteration) {
EXPECT_EQ(oneof_count, md.oneof_count());
}
TEST(Cpp, Arena) {
int n = 100000;
struct Decrementer {
Decrementer(int* _p) : p(_p) {}
~Decrementer() { (*p)--; }
int* p;
};
{
upb::Arena arena;
for (int i = 0; i < n; i++) {
arena.Own(new Decrementer(&n));
// Intersperse allocation and ensure we can write to it.
int* val = static_cast<int*>(upb_Arena_Malloc(arena.ptr(), sizeof(int)));
*val = i;
}
// Test a large allocation.
upb_Arena_Malloc(arena.ptr(), 1000000);
}
EXPECT_EQ(0, n);
{
// Test fuse.
upb::Arena arena1;
upb::Arena arena2;
arena1.Fuse(arena2);
upb_Arena_Malloc(arena1.ptr(), 10000);
upb_Arena_Malloc(arena2.ptr(), 10000);
}
}
TEST(Cpp, InlinedArena) {
int n = 100000;
struct Decrementer {
Decrementer(int* _p) : p(_p) {}
~Decrementer() { (*p)--; }
int* p;
};
{
upb::InlinedArena<1024> arena;
for (int i = 0; i < n; i++) {
arena.Own(new Decrementer(&n));
// Intersperse allocation and ensure we can write to it.
int* val = static_cast<int*>(upb_Arena_Malloc(arena.ptr(), sizeof(int)));
*val = i;
}
// Test a large allocation.
upb_Arena_Malloc(arena.ptr(), 1000000);
}
EXPECT_EQ(0, n);
}
TEST(Cpp, InlinedArena2) {
upb::InlinedArena<64> arena;
upb_Arena_Malloc(arena.ptr(), sizeof(int));

@ -854,95 +854,11 @@ static void decrement_int(void* ptr) {
(*iptr)--;
}
TEST(GeneratedCode, ArenaFuse) {
int i1 = 5;
int i2 = 5;
int i3 = 5;
int i4 = 5;
upb_Arena* arena1 = upb_Arena_New();
upb_Arena* arena2 = upb_Arena_New();
upb_Arena_AddCleanup(arena1, &i1, decrement_int);
upb_Arena_AddCleanup(arena2, &i2, decrement_int);
EXPECT_TRUE(upb_Arena_Fuse(arena1, arena2));
upb_Arena_AddCleanup(arena1, &i3, decrement_int);
upb_Arena_AddCleanup(arena2, &i4, decrement_int);
upb_Arena_Free(arena1);
EXPECT_EQ(5, i1);
EXPECT_EQ(5, i2);
EXPECT_EQ(5, i3);
EXPECT_EQ(5, i4);
upb_Arena_Free(arena2);
EXPECT_EQ(4, i1);
EXPECT_EQ(4, i2);
EXPECT_EQ(4, i3);
EXPECT_EQ(4, i4);
}
/* Do nothing allocator for testing */
static void* test_allocfunc(upb_alloc* alloc, void* ptr, size_t oldsize,
size_t size) {
return upb_alloc_global.func(alloc, ptr, oldsize, size);
}
upb_alloc test_alloc = {&test_allocfunc};
TEST(GeneratedCode, FuseWithInitialBlock) {
char buf1[1024];
char buf2[1024];
upb_Arena* arenas[] = {upb_Arena_Init(buf1, 1024, &upb_alloc_global),
upb_Arena_Init(buf2, 1024, &upb_alloc_global),
upb_Arena_Init(NULL, 0, &test_alloc),
upb_Arena_Init(NULL, 0, &upb_alloc_global)};
int size = sizeof(arenas) / sizeof(arenas[0]);
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
if (i == j) {
EXPECT_TRUE(upb_Arena_Fuse(arenas[i], arenas[j]));
} else {
EXPECT_FALSE(upb_Arena_Fuse(arenas[i], arenas[j]));
}
}
}
for (int i = 0; i < size; ++i) upb_Arena_Free(arenas[i]);
}
TEST(GeneratedCode, ArenaDecode) {
// Tests against a bug that previously existed when passing an arena to
// upb_decode().
char large_string[1024] = {0};
upb_StringView large_string_view = {large_string, sizeof(large_string)};
upb_Arena* tmp = upb_Arena_New();
protobuf_test_messages_proto3_TestAllTypesProto3* msg =
protobuf_test_messages_proto3_TestAllTypesProto3_new(tmp);
protobuf_test_messages_proto3_TestAllTypesProto3_set_optional_bytes(
msg, large_string_view);
upb_StringView serialized;
serialized.data = protobuf_test_messages_proto3_TestAllTypesProto3_serialize(
msg, tmp, &serialized.size);
upb_Arena* arena = upb_Arena_New();
// Parse the large payload, forcing an arena block to be allocated. This used
// to corrupt the cleanup list, preventing subsequent upb_Arena_AddCleanup()
// calls from working properly.
protobuf_test_messages_proto3_TestAllTypesProto3_parse(
serialized.data, serialized.size, arena);
int i1 = 5;
upb_Arena_AddCleanup(arena, &i1, decrement_int);
EXPECT_EQ(5, i1);
upb_Arena_Free(arena);
EXPECT_EQ(4, i1);
upb_Arena_Free(tmp);
}
TEST(GeneratedCode, ArenaUnaligned) {
char buf1[1024];

@ -77,14 +77,6 @@ class Arena {
upb_Arena* ptr() const { return ptr_.get(); }
// Add a cleanup function to run when the arena is destroyed.
// Returns false on out-of-memory.
template <class T>
bool Own(T* obj) {
return upb_Arena_AddCleanup(ptr_.get(), obj,
[](void* obj) { delete static_cast<T*>(obj); });
}
void Fuse(Arena& other) { upb_Arena_Fuse(ptr(), other.ptr()); }
protected:

@ -1285,7 +1285,6 @@ static upb_DecodeStatus upb_Decoder_Decode(upb_Decoder* const decoder,
_upb_MemBlock* blocks =
upb_Atomic_Load(&decoder->arena.blocks, memory_order_relaxed);
arena->head = decoder->arena.head;
arena->cleanup_metadata = decoder->arena.cleanup_metadata;
upb_Atomic_Store(&arena->blocks, blocks, memory_order_relaxed);
return decoder->status;
}
@ -1316,7 +1315,6 @@ upb_DecodeStatus upb_Decode(const char* buf, size_t size, void* msg,
_upb_MemBlock* blocks = upb_Atomic_Load(&arena->blocks, memory_order_relaxed);
decoder.arena.head = arena->head;
decoder.arena.block_alloc = arena->block_alloc;
decoder.arena.cleanup_metadata = arena->cleanup_metadata;
upb_Atomic_Init(&decoder.arena.blocks, blocks);
return upb_Decoder_Decode(&decoder, buf, msg, l, arena);

Loading…
Cancel
Save