Arena refactor: moves cleanup list into regular blocks (#277)

* WIP.

* WIP.

* Tests are passing.

* Recover some perf: LIKELY doesn't propagate through functions. :(

* Added some more benchmarks.

* Simplify & optimize upb_arena_realloc().

* Only add owned blocks to the freelist.

* More optimization/simplification.

* Re-fixed the bug.

* Revert unintentional changes to parser.rl.

* Revert Lua changes for now.

* Revert the arena fuse changes for now.

* Added last_size to the arena representation.

* Fixed compile errors.

* Fixed compile error and changed benchmarks to do one allocation.
pull/13171/head
Joshua Haberman 5 years ago committed by GitHub
parent a0ae30bd16
commit 2b1e7dc1cc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 14
      tests/benchmark.cc
  2. 17
      upb/def.c
  3. 6
      upb/port_def.inc
  4. 4
      upb/port_undef.inc
  5. 176
      upb/upb.c
  6. 32
      upb/upb.h

@ -9,21 +9,23 @@ upb_strview descriptor = google_protobuf_descriptor_proto_upbdefinit.descriptor;
/* A buffer big enough to parse descriptor.proto without going to heap. */
char buf[65535];
static void BM_CreateArenaNoHeap(benchmark::State& state) {
static void BM_ArenaOneAlloc(benchmark::State& state) {
for (auto _ : state) {
upb_arena* arena = upb_arena_init(buf, sizeof(buf), NULL);
upb_arena* arena = upb_arena_new();
upb_arena_malloc(arena, 1);
upb_arena_free(arena);
}
}
BENCHMARK(BM_CreateArenaNoHeap);
BENCHMARK(BM_ArenaOneAlloc);
static void BM_CreateArena(benchmark::State& state) {
static void BM_ArenaInitialBlockOneAlloc(benchmark::State& state) {
for (auto _ : state) {
upb_arena* arena = upb_arena_new();
upb_arena* arena = upb_arena_init(buf, sizeof(buf), NULL);
upb_arena_malloc(arena, 1);
upb_arena_free(arena);
}
}
BENCHMARK(BM_CreateArena);
BENCHMARK(BM_ArenaInitialBlockOneAlloc);
static void BM_ParseDescriptorNoHeap(benchmark::State& state) {
size_t bytes = 0;

@ -871,16 +871,6 @@ void upb_oneof_iter_setdone(upb_oneof_iter *iter) {
/* Dynamic Layout Generation. *************************************************/
static bool is_power_of_two(size_t val) {
return (val & (val - 1)) == 0;
}
/* Align up to the given power of 2. */
static size_t align_up(size_t val, size_t align) {
UPB_ASSERT(is_power_of_two(align));
return (val + align - 1) & ~(align - 1);
}
static size_t div_round_up(size_t n, size_t d) {
return (n + d - 1) / d;
}
@ -922,7 +912,7 @@ static uint8_t upb_msg_fielddefsize(const upb_fielddef *f) {
static uint32_t upb_msglayout_place(upb_msglayout *l, size_t size) {
uint32_t ret;
l->size = align_up(l->size, size);
l->size = UPB_ALIGN_UP(l->size, size);
ret = l->size;
l->size += size;
return ret;
@ -977,7 +967,8 @@ static bool make_layout(const upb_symtab *symtab, const upb_msgdef *m) {
}
l->field_count = 2;
l->size = 2 * sizeof(upb_strview);align_up(l->size, 8);
l->size = 2 * sizeof(upb_strview);
l->size = UPB_ALIGN_UP(l->size, 8);
return true;
}
@ -1082,7 +1073,7 @@ static bool make_layout(const upb_symtab *symtab, const upb_msgdef *m) {
/* Size of the entire structure should be a multiple of its greatest
* alignment. TODO: track overall alignment for real? */
l->size = align_up(l->size, 8);
l->size = UPB_ALIGN_UP(l->size, 8);
return true;
}

@ -21,6 +21,7 @@
* This file is private and must not be included by users!
*/
#include <stdint.h>
#include <stddef.h>
#if UINTPTR_MAX == 0xffffffff
#define UPB_SIZE(size32, size64) size32
@ -53,6 +54,11 @@
#define UPB_INLINE static
#endif
#define UPB_ALIGN_UP(size, align) (((size) + (align) - 1) / (align) * (align))
#define UPB_ALIGN_DOWN(size, align) ((size) / (align) * (align))
#define UPB_ALIGN_MALLOC(size) UPB_ALIGN_UP(size, 16)
#define UPB_ALIGN_OF(type) offsetof (struct { char c; type member; }, member)
/* Hints to the compiler about likely/unlikely branches. */
#if defined (__GNUC__) || defined(__clang__)
#define UPB_LIKELY(x) __builtin_expect((x),1)

@ -6,6 +6,10 @@
#undef UPB_READ_ONEOF
#undef UPB_WRITE_ONEOF
#undef UPB_INLINE
#undef UPB_ALIGN_UP
#undef UPB_ALIGN_DOWN
#undef UPB_ALIGN_MALLOC
#undef UPB_ALIGN_OF
#undef UPB_FORCEINLINE
#undef UPB_NOINLINE
#undef UPB_NORETURN

@ -64,130 +64,120 @@ upb_alloc upb_alloc_global = {&upb_global_allocfunc};
/* Be conservative and choose 16 in case anyone is using SSE. */
struct upb_arena {
_upb_arena_head head;
char *start;
/* Allocator to allocate arena blocks. We are responsible for freeing these
* when we are destroyed. */
upb_alloc *block_alloc;
size_t next_block_size;
size_t max_block_size;
/* Linked list of blocks. Points to an arena_block, defined in env.c */
void *block_head;
/* Cleanup entries. Pointer to a cleanup_ent, defined in env.c */
void *cleanup_head;
};
typedef struct mem_block {
struct mem_block *next;
bool owned;
uint32_t size;
uint32_t cleanups;
/* Data follows. */
} mem_block;
typedef struct cleanup_ent {
struct cleanup_ent *next;
upb_cleanup_func *cleanup;
void *ud;
} cleanup_ent;
static void upb_arena_addblock(upb_arena *a, void *ptr, size_t size,
bool owned) {
struct upb_arena {
_upb_arena_head head;
uint32_t *cleanups;
/* Allocator to allocate arena blocks. We are responsible for freeing these
* when we are destroyed. */
upb_alloc *block_alloc;
size_t last_size;
/* Linked list of blocks to free/cleanup. */
mem_block *freelist;
};
static const size_t memblock_reserve = UPB_ALIGN_UP(sizeof(mem_block), 16);
static void upb_arena_addblock(upb_arena *a, void *ptr, size_t size) {
mem_block *block = ptr;
block->next = a->block_head;
block->owned = owned;
block->next = a->freelist;
block->size = size;
block->cleanups = 0;
a->freelist = block;
a->last_size = size;
a->block_head = block;
a->start = (char*)block + _upb_arena_alignup(sizeof(mem_block));
a->head.ptr = a->start;
a->head.end = (char*)block + size;
a->head.ptr = UPB_PTR_AT(block, memblock_reserve, char);
a->head.end = UPB_PTR_AT(block, size, char);
a->cleanups = &block->cleanups;
/* TODO(haberman): ASAN poison. */
}
static mem_block *upb_arena_allocblock(upb_arena *a, size_t size) {
size_t block_size = UPB_MAX(size, a->next_block_size) + sizeof(mem_block);
static bool upb_arena_allocblock(upb_arena *a, size_t size) {
size_t block_size = UPB_MAX(size, a->last_size * 2) + memblock_reserve;
mem_block *block = upb_malloc(a->block_alloc, block_size);
if (!block) {
return NULL;
}
upb_arena_addblock(a, block, block_size, true);
a->next_block_size = UPB_MIN(block_size * 2, a->max_block_size);
if (!block) return false;
upb_arena_addblock(a, block, block_size);
return true;
}
return block;
static bool arena_has(upb_arena *a, size_t size) {
_upb_arena_head *h = (_upb_arena_head*)a;
return (size_t)(h->end - h->ptr) >= size;
}
void *_upb_arena_slowmalloc(upb_arena *a, size_t size) {
mem_block *block = upb_arena_allocblock(a, size);
if (!block) return NULL; /* Out of memory. */
if (!upb_arena_allocblock(a, size)) return NULL; /* Out of memory. */
UPB_ASSERT(arena_has(a, size));
return upb_arena_malloc(a, size);
}
static void *upb_arena_doalloc(upb_alloc *alloc, void *ptr, size_t oldsize,
size_t size) {
upb_arena *a = (upb_arena*)alloc; /* upb_alloc is initial member. */
void *ret;
if (size == 0) {
return NULL; /* We are an arena, don't need individual frees. */
}
return upb_arena_realloc(a, ptr, oldsize, size);
}
ret = upb_arena_malloc(a, size);
if (!ret) return NULL;
/* Public Arena API ***********************************************************/
/* TODO(haberman): special-case if this is a realloc of the last alloc? */
upb_arena *arena_initslow(void *mem, size_t n, upb_alloc *alloc) {
const size_t first_block_overhead = sizeof(upb_arena) + memblock_reserve;
upb_arena *a;
if (oldsize > 0) {
memcpy(ret, ptr, oldsize); /* Preserve existing data. */
/* We need to malloc the initial block. */
n = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, n))) {
return NULL;
}
/* TODO(haberman): ASAN unpoison. */
return ret;
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_arena);
n -= sizeof(*a);
/* Public Arena API ***********************************************************/
a->head.alloc.func = &upb_arena_doalloc;
a->block_alloc = alloc;
a->freelist = NULL;
upb_arena_addblock(a, mem, n);
#define upb_alignof(type) offsetof (struct { char c; type member; }, member)
return a;
}
upb_arena *upb_arena_init(void *mem, size_t n, upb_alloc *alloc) {
const size_t first_block_overhead = sizeof(upb_arena) + sizeof(mem_block);
upb_arena *a;
bool owned = false;
/* Round block size down to alignof(*a) since we will allocate the arena
* itself at the end. */
n &= ~(upb_alignof(upb_arena) - 1);
if (n < first_block_overhead) {
/* We need to malloc the initial block. */
n = first_block_overhead + 256;
owned = true;
if (!alloc || !(mem = upb_malloc(alloc, n))) {
return NULL;
}
n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_arena));
if (UPB_UNLIKELY(n < sizeof(upb_arena))) {
return arena_initslow(mem, n, alloc);
}
a = (void*)((char*)mem + n - sizeof(*a));
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_arena);
n -= sizeof(*a);
a->head.alloc.func = &upb_arena_doalloc;
a->head.ptr = NULL;
a->head.end = NULL;
a->start = NULL;
a->block_alloc = &upb_alloc_global;
a->next_block_size = 256;
a->max_block_size = 16384;
a->cleanup_head = NULL;
a->block_head = NULL;
a->block_alloc = alloc;
upb_arena_addblock(a, mem, n, owned);
a->last_size = 128;
a->head.ptr = mem;
a->head.end = UPB_PTR_AT(mem, n, char);
a->freelist = NULL;
a->cleanups = NULL;
return a;
}
@ -195,38 +185,40 @@ upb_arena *upb_arena_init(void *mem, size_t n, upb_alloc *alloc) {
#undef upb_alignof
void upb_arena_free(upb_arena *a) {
cleanup_ent *ent = a->cleanup_head;
mem_block *block = a->block_head;
while (ent) {
ent->cleanup(ent->ud);
ent = ent->next;
}
mem_block *block = a->freelist;
/* Must do this after running cleanup functions, because this will delete
* the memory we store our cleanup entries in! */
while (block) {
/* Load first since we are deleting block. */
mem_block *next = block->next;
if (block->owned) {
upb_free(a->block_alloc, block);
if (block->cleanups > 0) {
cleanup_ent *end = UPB_PTR_AT(block, block->size, void);
cleanup_ent *ptr = end - block->cleanups;
for (; ptr < end; ptr++) {
ptr->cleanup(ptr->ud);
}
}
upb_free(a->block_alloc, block);
block = next;
}
}
bool upb_arena_addcleanup(upb_arena *a, void *ud, upb_cleanup_func *func) {
cleanup_ent *ent = upb_malloc(&a->head.alloc, sizeof(cleanup_ent));
if (!ent) {
return false; /* Out of memory. */
cleanup_ent *ent;
if (!a->cleanups || !arena_has(a, sizeof(cleanup_ent))) {
if (!upb_arena_allocblock(a, 128)) return false; /* Out of memory. */
UPB_ASSERT(arena_has(a, sizeof(cleanup_ent)));
}
a->head.end -= sizeof(cleanup_ent);
ent = (cleanup_ent*)a->head.end;
(*a->cleanups)++;
ent->cleanup = func;
ent->ud = ud;
ent->next = a->cleanup_head;
a->cleanup_head = ent;
return true;
}

@ -142,17 +142,13 @@ typedef struct upb_arena upb_arena;
typedef struct {
/* We implement the allocator interface.
* This must be the first member of upb_arena! */
* This must be the first member of upb_arena!
* TODO(haberman): remove once handlers are gone. */
upb_alloc alloc;
char *ptr, *end;
} _upb_arena_head;
UPB_INLINE size_t _upb_arena_alignup(size_t size) {
const size_t maxalign = 16;
return ((size + maxalign - 1) / maxalign) * maxalign;
}
/* Creates an arena from the given initial block (if any -- n may be 0).
* Additional blocks will be allocated from |alloc|. If |alloc| is NULL, this
* is a fixed-size arena and cannot grow. */
@ -165,23 +161,27 @@ UPB_INLINE upb_alloc *upb_arena_alloc(upb_arena *a) { return (upb_alloc*)a; }
UPB_INLINE void *upb_arena_malloc(upb_arena *a, size_t size) {
_upb_arena_head *h = (_upb_arena_head*)a;
size = _upb_arena_alignup(size);
if (UPB_LIKELY((size_t)(h->end - h->ptr) >= size)) {
void* ret = h->ptr;
h->ptr += size;
return ret;
} else {
void* ret;
size = UPB_ALIGN_MALLOC(size);
if (UPB_UNLIKELY((size_t)(h->end - h->ptr) < size)) {
return _upb_arena_slowmalloc(a, size);
}
ret = h->ptr;
h->ptr += size;
return ret;
}
UPB_INLINE void *upb_arena_realloc(upb_arena *a, void *ptr, size_t oldsize,
size_t size) {
if (oldsize == 0) {
return upb_arena_malloc(a, size);
} else {
return upb_realloc(upb_arena_alloc(a), ptr, oldsize, size);
void *ret = upb_arena_malloc(a, size);
if (ret && oldsize > 0) {
memcpy(ret, ptr, oldsize);
}
return ret;
}
UPB_INLINE upb_arena *upb_arena_new(void) {

Loading…
Cancel
Save