Merge pull request #378 from fowles/master

Allow arena fuse to fail and only fuse when they are safe
pull/13171/head
Matt Fowles Kulukundis 4 years ago committed by GitHub
commit b053fa6991
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 31
      tests/test_generated_code.c
  2. 4
      upb/decode.c
  3. 37
      upb/upb.c
  4. 2
      upb/upb.h
  5. 5
      upb/upb.int.h

@ -427,7 +427,7 @@ void test_arena_fuse(void) {
upb_arena_addcleanup(arena1, &i1, decrement_int);
upb_arena_addcleanup(arena2, &i2, decrement_int);
upb_arena_fuse(arena1, arena2);
ASSERT(upb_arena_fuse(arena1, arena2));
upb_arena_addcleanup(arena1, &i3, decrement_int);
upb_arena_addcleanup(arena2, &i4, decrement_int);
@ -444,6 +444,34 @@ void test_arena_fuse(void) {
ASSERT(i4 == 4);
}
/* Do nothing allocator for testing */
static void *test_allocfunc(upb_alloc *alloc, void *ptr, size_t oldsize,
size_t size) {
return upb_alloc_global.func(alloc, ptr, oldsize, size);
}
upb_alloc test_alloc = {&test_allocfunc};
void test_arena_fuse_with_initial_block(void) {
char buf1[1024];
char buf2[1024];
upb_arena *arenas[] = {upb_arena_init(buf1, 1024, &upb_alloc_global),
upb_arena_init(buf2, 1024, &upb_alloc_global),
upb_arena_init(NULL, 0, &test_alloc),
upb_arena_init(NULL, 0, &upb_alloc_global)};
int size = sizeof(arenas)/sizeof(arenas[0]);
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
if (i == j) {
ASSERT(upb_arena_fuse(arenas[i], arenas[j]));
} else {
ASSERT(!upb_arena_fuse(arenas[i], arenas[j]));
}
}
}
for (int i = 0; i < size; ++i) upb_arena_free(arenas[i]);
}
void test_arena_decode(void) {
// Tests against a bug that previously existed when passing an arena to
// upb_decode().
@ -487,6 +515,7 @@ int run_tests(int argc, char *argv[]) {
test_null_decode_buf();
test_status_truncation();
test_arena_fuse();
test_arena_fuse_with_initial_block();
test_arena_decode();
return 0;
}

@ -699,7 +699,7 @@ bool _upb_decode(const char *buf, size_t size, void *msg,
state.end_group = DECODE_NOGROUP;
state.arena.head = arena->head;
state.arena.last_size = arena->last_size;
state.arena.cleanups = arena->cleanups;
state.arena.cleanup_metadata = arena->cleanup_metadata;
state.arena.parent = arena;
if (UPB_UNLIKELY(UPB_SETJMP(state.err))) {
@ -710,7 +710,7 @@ bool _upb_decode(const char *buf, size_t size, void *msg,
arena->head.ptr = state.arena.head.ptr;
arena->head.end = state.arena.head.end;
arena->cleanups = state.arena.cleanups;
arena->cleanup_metadata = state.arena.cleanup_metadata;
return ok;
}

@ -67,6 +67,19 @@ static void *upb_global_allocfunc(upb_alloc *alloc, void *ptr, size_t oldsize,
}
}
static uint32_t *upb_cleanup_pointer(uintptr_t cleanup_metadata) {
return (uint32_t *)(cleanup_metadata & ~0x1);
}
static bool upb_cleanup_has_initial_block(uintptr_t cleanup_metadata) {
return cleanup_metadata & 0x1;
}
static uintptr_t upb_cleanup_metadata(uint32_t *cleanup,
bool has_initial_block) {
return (uintptr_t)cleanup | has_initial_block;
}
upb_alloc upb_alloc_global = {&upb_global_allocfunc};
/* upb_arena ******************************************************************/
@ -112,7 +125,8 @@ static void upb_arena_addblock(upb_arena *a, upb_arena *root, void *ptr,
a->head.ptr = UPB_PTR_AT(block, memblock_reserve, char);
a->head.end = UPB_PTR_AT(block, size, char);
a->cleanups = &block->cleanups;
a->cleanup_metadata = upb_cleanup_metadata(
&block->cleanups, upb_cleanup_has_initial_block(a->cleanup_metadata));
UPB_POISON_MEMORY_REGION(a->head.ptr, a->head.end - a->head.ptr);
}
@ -160,6 +174,7 @@ upb_arena *arena_initslow(void *mem, size_t n, upb_alloc *alloc) {
a->refcount = 1;
a->freelist = NULL;
a->freelist_tail = NULL;
a->cleanup_metadata = upb_cleanup_metadata(NULL, false);
upb_arena_addblock(a, a, mem, n);
@ -187,7 +202,7 @@ upb_arena *upb_arena_init(void *mem, size_t n, upb_alloc *alloc) {
a->head.ptr = mem;
a->head.end = UPB_PTR_AT(mem, n - sizeof(*a), char);
a->freelist = NULL;
a->cleanups = NULL;
a->cleanup_metadata = upb_cleanup_metadata(NULL, true);
return a;
}
@ -222,15 +237,17 @@ void upb_arena_free(upb_arena *a) {
bool upb_arena_addcleanup(upb_arena *a, void *ud, upb_cleanup_func *func) {
cleanup_ent *ent;
uint32_t* cleanups = upb_cleanup_pointer(a->cleanup_metadata);
if (!a->cleanups || _upb_arenahas(a) < sizeof(cleanup_ent)) {
if (!cleanups || _upb_arenahas(a) < sizeof(cleanup_ent)) {
if (!upb_arena_allocblock(a, 128)) return false; /* Out of memory. */
UPB_ASSERT(_upb_arenahas(a) >= sizeof(cleanup_ent));
cleanups = upb_cleanup_pointer(a->cleanup_metadata);
}
a->head.end -= sizeof(cleanup_ent);
ent = (cleanup_ent*)a->head.end;
(*a->cleanups)++;
(*cleanups)++;
UPB_UNPOISON_MEMORY_REGION(ent, sizeof(cleanup_ent));
ent->cleanup = func;
@ -239,11 +256,18 @@ bool upb_arena_addcleanup(upb_arena *a, void *ud, upb_cleanup_func *func) {
return true;
}
void upb_arena_fuse(upb_arena *a1, upb_arena *a2) {
bool upb_arena_fuse(upb_arena *a1, upb_arena *a2) {
upb_arena *r1 = arena_findroot(a1);
upb_arena *r2 = arena_findroot(a2);
if (r1 == r2) return; /* Already fused. */
if (r1 == r2) return true; /* Already fused. */
/* Do not fuse initial blocks since we cannot lifetime extend them. */
if (upb_cleanup_has_initial_block(r1->cleanup_metadata)) return false;
if (upb_cleanup_has_initial_block(r2->cleanup_metadata)) return false;
/* Only allow fuse with a common allocator */
if (r1->block_alloc != r2->block_alloc) return false;
/* We want to join the smaller tree to the larger tree.
* So swap first if they are backwards. */
@ -261,4 +285,5 @@ void upb_arena_fuse(upb_arena *a1, upb_arena *a2) {
r1->freelist = r2->freelist;
}
r2->parent = r1;
return true;
}

@ -159,7 +159,7 @@ typedef struct {
upb_arena *upb_arena_init(void *mem, size_t n, upb_alloc *alloc);
void upb_arena_free(upb_arena *a);
bool upb_arena_addcleanup(upb_arena *a, void *ud, upb_cleanup_func *func);
void upb_arena_fuse(upb_arena *a, upb_arena *b);
bool upb_arena_fuse(upb_arena *a, upb_arena *b);
void *_upb_arena_slowmalloc(upb_arena *a, size_t size);
UPB_INLINE upb_alloc *upb_arena_alloc(upb_arena *a) { return (upb_alloc*)a; }

@ -9,7 +9,10 @@ typedef struct mem_block mem_block;
struct upb_arena {
_upb_arena_head head;
uint32_t *cleanups;
/* Stores cleanup metadata for this arena.
* - a pointer to the current cleanup counter.
* - a boolean indicating if there is an unowned initial block. */
uintptr_t cleanup_metadata;
/* Allocator to allocate arena blocks. We are responsible for freeing these
* when we are destroyed. */

Loading…
Cancel
Save