Protocol Buffers - Google's data interchange format (grpc依赖) https://developers.google.com/protocol-buffers/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

285 lines
7.3 KiB

#include "upb/upb.h"
#include <errno.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "upb/port_def.inc"
/* upb_status *****************************************************************/
void upb_status_clear(upb_status *status) {
if (!status) return;
status->ok = true;
status->msg[0] = '\0';
}
bool upb_ok(const upb_status *status) { return status->ok; }
const char *upb_status_errmsg(const upb_status *status) { return status->msg; }
void upb_status_seterrmsg(upb_status *status, const char *msg) {
if (!status) return;
status->ok = false;
strncpy(status->msg, msg, UPB_STATUS_MAX_MESSAGE - 1);
status->msg[UPB_STATUS_MAX_MESSAGE - 1] = '\0';
}
void upb_status_seterrf(upb_status *status, const char *fmt, ...) {
va_list args;
va_start(args, fmt);
upb_status_vseterrf(status, fmt, args);
va_end(args);
}
void upb_status_vseterrf(upb_status *status, const char *fmt, va_list args) {
if (!status) return;
status->ok = false;
_upb_vsnprintf(status->msg, sizeof(status->msg), fmt, args);
status->msg[UPB_STATUS_MAX_MESSAGE - 1] = '\0';
}
void upb_status_vappenderrf(upb_status *status, const char *fmt, va_list args) {
size_t len;
if (!status) return;
status->ok = false;
len = strlen(status->msg);
_upb_vsnprintf(status->msg + len, sizeof(status->msg) - len, fmt, args);
status->msg[UPB_STATUS_MAX_MESSAGE - 1] = '\0';
}
/* upb_alloc ******************************************************************/
static void *upb_global_allocfunc(upb_alloc *alloc, void *ptr, size_t oldsize,
size_t size) {
UPB_UNUSED(alloc);
UPB_UNUSED(oldsize);
if (size == 0) {
free(ptr);
return NULL;
} else {
return realloc(ptr, size);
}
}
upb_alloc upb_alloc_global = {&upb_global_allocfunc};
/* upb_arena ******************************************************************/
/* Be conservative and choose 16 in case anyone is using SSE. */
typedef struct mem_block {
struct mem_block *next;
uint32_t size;
uint32_t cleanups;
/* Data follows. */
} mem_block;
typedef struct cleanup_ent {
upb_cleanup_func *cleanup;
void *ud;
} cleanup_ent;
struct upb_arena {
_upb_arena_head head;
uint32_t *cleanups;
/* Allocator to allocate arena blocks. We are responsible for freeing these
* when we are destroyed. */
upb_alloc *block_alloc;
uint32_t last_size;
/* When multiple arenas are fused together, each arena points to a parent
* arena (root points to itself). The root tracks how many live arenas
* reference it. */
uint32_t refcount; /* Only used when a->parent == a */
struct upb_arena *parent;
/* Linked list of blocks to free/cleanup. */
mem_block *freelist, *freelist_tail;
};
static const size_t memblock_reserve = UPB_ALIGN_UP(sizeof(mem_block), 16);
static void upb_arena_addblock(upb_arena *a, void *ptr, size_t size) {
mem_block *block = ptr;
block->next = a->freelist;
block->size = size;
block->cleanups = 0;
a->freelist = block;
a->last_size = size;
if (!a->freelist_tail) a->freelist_tail = block;
a->head.ptr = UPB_PTR_AT(block, memblock_reserve, char);
a->head.end = UPB_PTR_AT(block, size, char);
a->cleanups = &block->cleanups;
/* TODO(haberman): ASAN poison. */
}
static bool upb_arena_allocblock(upb_arena *a, size_t size) {
size_t block_size = UPB_MAX(size, a->last_size * 2) + memblock_reserve;
mem_block *block = upb_malloc(a->block_alloc, block_size);
if (!block) return false;
upb_arena_addblock(a, block, block_size);
return true;
}
static bool arena_has(upb_arena *a, size_t size) {
_upb_arena_head *h = (_upb_arena_head*)a;
return (size_t)(h->end - h->ptr) >= size;
}
void *_upb_arena_slowmalloc(upb_arena *a, size_t size) {
if (!upb_arena_allocblock(a, size)) return NULL; /* Out of memory. */
UPB_ASSERT(arena_has(a, size));
return upb_arena_malloc(a, size);
}
static void *upb_arena_doalloc(upb_alloc *alloc, void *ptr, size_t oldsize,
size_t size) {
upb_arena *a = (upb_arena*)alloc; /* upb_alloc is initial member. */
return upb_arena_realloc(a, ptr, oldsize, size);
}
static upb_arena *arena_findroot(upb_arena *a) {
/* Path splitting keeps time complexity down, see:
* https://en.wikipedia.org/wiki/Disjoint-set_data_structure */
while (a->parent != a) {
upb_arena *next = a->parent;
a->parent = next->parent;
a = next;
}
return a;
}
/* Public Arena API ***********************************************************/
upb_arena *arena_initslow(void *mem, size_t n, upb_alloc *alloc) {
const size_t first_block_overhead = sizeof(upb_arena) + memblock_reserve;
upb_arena *a;
/* We need to malloc the initial block. */
n = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, n))) {
return NULL;
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_arena);
n -= sizeof(*a);
a->head.alloc.func = &upb_arena_doalloc;
a->block_alloc = alloc;
a->parent = a;
a->refcount = 1;
a->freelist = NULL;
a->freelist_tail = NULL;
upb_arena_addblock(a, mem, n);
return a;
}
upb_arena *upb_arena_init(void *mem, size_t n, upb_alloc *alloc) {
upb_arena *a;
/* Round block size down to alignof(*a) since we will allocate the arena
* itself at the end. */
n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_arena));
if (UPB_UNLIKELY(n < sizeof(upb_arena))) {
return arena_initslow(mem, n, alloc);
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_arena);
n -= sizeof(*a);
a->head.alloc.func = &upb_arena_doalloc;
a->block_alloc = alloc;
a->parent = a;
a->refcount = 1;
a->last_size = 128;
a->head.ptr = mem;
a->head.end = UPB_PTR_AT(mem, n, char);
a->freelist = NULL;
a->cleanups = NULL;
return a;
}
static void arena_dofree(upb_arena *a) {
mem_block *block = a->freelist;
UPB_ASSERT(a->parent == a);
UPB_ASSERT(a->refcount == 0);
while (block) {
/* Load first since we are deleting block. */
mem_block *next = block->next;
if (block->cleanups > 0) {
cleanup_ent *end = UPB_PTR_AT(block, block->size, void);
cleanup_ent *ptr = end - block->cleanups;
for (; ptr < end; ptr++) {
ptr->cleanup(ptr->ud);
}
}
upb_free(a->block_alloc, block);
block = next;
}
}
void upb_arena_free(upb_arena *a) {
a = arena_findroot(a);
if (--a->refcount == 0) arena_dofree(a);
}
bool upb_arena_addcleanup(upb_arena *a, void *ud, upb_cleanup_func *func) {
cleanup_ent *ent;
if (!a->cleanups || !arena_has(a, sizeof(cleanup_ent))) {
if (!upb_arena_allocblock(a, 128)) return false; /* Out of memory. */
UPB_ASSERT(arena_has(a, sizeof(cleanup_ent)));
}
a->head.end -= sizeof(cleanup_ent);
ent = (cleanup_ent*)a->head.end;
(*a->cleanups)++;
ent->cleanup = func;
ent->ud = ud;
return true;
}
void upb_arena_fuse(upb_arena *a1, upb_arena *a2) {
upb_arena *r1 = arena_findroot(a1);
upb_arena *r2 = arena_findroot(a2);
if (r1 == r2) return; /* Already fused. */
/* We want to join the smaller tree to the larger tree.
* So swap first if they are backwards. */
if (r1->refcount < r2->refcount) {
upb_arena *tmp = r1;
r1 = r2;
r2 = tmp;
}
/* r1 takes over r2's freelist and refcount. */
r1->refcount += r2->refcount;
if (r2->freelist_tail) {
UPB_ASSERT(r2->freelist_tail->next == NULL);
r2->freelist_tail->next = r1->freelist;
r1->freelist = r2->freelist;
}
r2->parent = r1;
}