Update UPB dependency

PiperOrigin-RevId: 524423191
pull/12470/head
Deanna Garcia 2 years ago committed by Copybara-Service
parent 84a3b594c7
commit c7d979dbf4
  1. 431
      php/ext/google/protobuf/php-upb.c
  2. 259
      php/ext/google/protobuf/php-upb.h
  3. 4
      protobuf_deps.bzl
  4. 4
      ruby/ext/google/protobuf_c/message.c
  5. 431
      ruby/ext/google/protobuf_c/ruby-upb.c
  6. 259
      ruby/ext/google/protobuf_c/ruby-upb.h

@ -168,14 +168,16 @@
#ifdef __GNUC__
#define UPB_USE_C11_ATOMICS
#define UPB_ATOMIC _Atomic
#define UPB_ATOMIC(T) _Atomic(T)
#else
#define UPB_ATOMIC
#define UPB_ATOMIC(T) T
#endif
/* UPB_PTRADD(ptr, ofs): add pointer while avoiding "NULL + 0" UB */
#define UPB_PTRADD(ptr, ofs) ((ofs) ? (ptr) + (ofs) : (ptr))
#define UPB_PRIVATE(x) x##_dont_copy_me__upb_internal_use_only
/* Configure whether fasttable is switched on or not. *************************/
#ifdef __has_attribute
@ -5504,39 +5506,28 @@ upb_alloc upb_alloc_global = {&upb_global_allocfunc};
// Must be last.
static uint32_t* upb_cleanup_pointer(uintptr_t cleanup_metadata) {
return (uint32_t*)(cleanup_metadata & ~0x1);
}
static bool upb_cleanup_has_initial_block(uintptr_t cleanup_metadata) {
return cleanup_metadata & 0x1;
}
static uintptr_t upb_cleanup_metadata(uint32_t* cleanup,
bool has_initial_block) {
return (uintptr_t)cleanup | has_initial_block;
}
struct _upb_MemBlock {
struct _upb_MemBlock* next;
// Atomic only for the benefit of SpaceAllocated().
UPB_ATOMIC(_upb_MemBlock*) next;
uint32_t size;
uint32_t cleanups;
// Data follows.
};
typedef struct cleanup_ent {
upb_CleanupFunc* cleanup;
void* ud;
} cleanup_ent;
static const size_t memblock_reserve =
UPB_ALIGN_UP(sizeof(_upb_MemBlock), UPB_MALLOC_ALIGN);
static upb_Arena* _upb_Arena_FindRoot(upb_Arena* a) {
uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
typedef struct _upb_ArenaRoot {
upb_Arena* root;
uintptr_t tagged_count;
} _upb_ArenaRoot;
static _upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
upb_Arena* next = _upb_Arena_PointerFromTagged(poc);
uintptr_t next_poc = upb_Atomic_LoadAcquire(&next->parent_or_count);
UPB_ASSERT(a != next);
uintptr_t next_poc =
upb_Atomic_Load(&next->parent_or_count, memory_order_acquire);
if (_upb_Arena_IsTaggedPointer(next_poc)) {
// To keep complexity down, we lazily collapse levels of the tree. This
@ -5558,23 +5549,27 @@ static upb_Arena* _upb_Arena_FindRoot(upb_Arena* a) {
// further away over time, but the path towards that root will continue to
// be valid and the creation of the path carries all the memory orderings
// required.
upb_Atomic_StoreRelaxed(&a->parent_or_count, next_poc);
UPB_ASSERT(a != _upb_Arena_PointerFromTagged(next_poc));
upb_Atomic_Store(&a->parent_or_count, next_poc, memory_order_relaxed);
}
a = next;
poc = next_poc;
}
return a;
return (_upb_ArenaRoot){.root = a, .tagged_count = poc};
}
size_t upb_Arena_SpaceAllocated(upb_Arena* arena) {
arena = _upb_Arena_FindRoot(arena);
arena = _upb_Arena_FindRoot(arena).root;
size_t memsize = 0;
_upb_MemBlock* block = arena->freelist;
while (block) {
memsize += sizeof(_upb_MemBlock) + block->size;
block = block->next;
while (arena != NULL) {
_upb_MemBlock* block =
upb_Atomic_Load(&arena->blocks, memory_order_relaxed);
while (block != NULL) {
memsize += sizeof(_upb_MemBlock) + block->size;
block = upb_Atomic_Load(&block->next, memory_order_relaxed);
}
arena = upb_Atomic_Load(&arena->next, memory_order_relaxed);
}
return memsize;
@ -5583,58 +5578,55 @@ size_t upb_Arena_SpaceAllocated(upb_Arena* arena) {
uint32_t upb_Arena_DebugRefCount(upb_Arena* a) {
// These loads could probably be relaxed, but given that this is debug-only,
// it's not worth introducing a new variant for it.
uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
a = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
}
return _upb_Arena_RefCountFromTagged(poc);
}
static void upb_Arena_addblock(upb_Arena* a, upb_Arena* root, void* ptr,
size_t size) {
static void upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
_upb_MemBlock* block = ptr;
/* The block is for arena |a|, but should appear in the freelist of |root|. */
block->next = root->freelist;
// Insert into linked list.
block->size = (uint32_t)size;
block->cleanups = 0;
root->freelist = block;
a->last_size = block->size;
if (!root->freelist_tail) root->freelist_tail = block;
upb_Atomic_Init(&block->next, a->blocks);
upb_Atomic_Store(&a->blocks, block, memory_order_release);
a->head.ptr = UPB_PTR_AT(block, memblock_reserve, char);
a->head.end = UPB_PTR_AT(block, size, char);
a->cleanup_metadata = upb_cleanup_metadata(
&block->cleanups, upb_cleanup_has_initial_block(a->cleanup_metadata));
UPB_POISON_MEMORY_REGION(a->head.ptr, a->head.end - a->head.ptr);
}
static bool upb_Arena_Allocblock(upb_Arena* a, size_t size) {
upb_Arena* root = _upb_Arena_FindRoot(a);
size_t block_size = UPB_MAX(size, a->last_size * 2) + memblock_reserve;
_upb_MemBlock* block = upb_malloc(root->block_alloc, block_size);
static bool upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
if (!a->block_alloc) return false;
_upb_MemBlock* last_block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
size_t last_size = last_block != NULL ? last_block->size : 128;
size_t block_size = UPB_MAX(size, last_size * 2) + memblock_reserve;
_upb_MemBlock* block = upb_malloc(upb_Arena_BlockAlloc(a), block_size);
if (!block) return false;
upb_Arena_addblock(a, root, block, block_size);
upb_Arena_AddBlock(a, block, block_size);
return true;
}
void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size) {
if (!upb_Arena_Allocblock(a, size)) return NULL; /* Out of memory. */
if (!upb_Arena_AllocBlock(a, size)) return NULL; /* Out of memory. */
UPB_ASSERT(_upb_ArenaHas(a) >= size);
return upb_Arena_Malloc(a, size);
}
/* Public Arena API ***********************************************************/
static upb_Arena* arena_initslow(void* mem, size_t n, upb_alloc* alloc) {
static upb_Arena* upb_Arena_InitSlow(upb_alloc* alloc) {
const size_t first_block_overhead = sizeof(upb_Arena) + memblock_reserve;
upb_Arena* a;
/* We need to malloc the initial block. */
n = first_block_overhead + 256;
char* mem;
size_t n = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, n))) {
return NULL;
}
@ -5642,13 +5634,13 @@ static upb_Arena* arena_initslow(void* mem, size_t n, upb_alloc* alloc) {
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
n -= sizeof(*a);
a->block_alloc = alloc;
a->block_alloc = upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
a->freelist = NULL;
a->freelist_tail = NULL;
a->cleanup_metadata = upb_cleanup_metadata(NULL, false);
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
upb_Arena_addblock(a, a, mem, n);
upb_Arena_AddBlock(a, mem, n);
return a;
}
@ -5669,51 +5661,48 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_Arena));
if (UPB_UNLIKELY(n < sizeof(upb_Arena))) {
return arena_initslow(mem, n, alloc);
return upb_Arena_InitSlow(alloc);
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
a->block_alloc = alloc;
upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
a->last_size = UPB_MAX(128, n);
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
a->block_alloc = upb_Arena_MakeBlockAlloc(alloc, 1);
a->head.ptr = mem;
a->head.end = UPB_PTR_AT(mem, n - sizeof(*a), char);
a->freelist = NULL;
a->freelist_tail = NULL;
a->cleanup_metadata = upb_cleanup_metadata(NULL, true);
return a;
}
static void arena_dofree(upb_Arena* a) {
_upb_MemBlock* block = a->freelist;
UPB_ASSERT(_upb_Arena_RefCountFromTagged(a->parent_or_count) == 1);
while (block) {
/* Load first since we are deleting block. */
_upb_MemBlock* next = block->next;
if (block->cleanups > 0) {
cleanup_ent* end = UPB_PTR_AT(block, block->size, void);
cleanup_ent* ptr = end - block->cleanups;
for (; ptr < end; ptr++) {
ptr->cleanup(ptr->ud);
}
while (a != NULL) {
// Load first since arena itself is likely from one of its blocks.
upb_Arena* next_arena =
(upb_Arena*)upb_Atomic_Load(&a->next, memory_order_acquire);
upb_alloc* block_alloc = upb_Arena_BlockAlloc(a);
_upb_MemBlock* block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
while (block != NULL) {
// Load first since we are deleting block.
_upb_MemBlock* next_block =
upb_Atomic_Load(&block->next, memory_order_acquire);
upb_free(block_alloc, block);
block = next_block;
}
upb_free(a->block_alloc, block);
block = next;
a = next_arena;
}
}
void upb_Arena_Free(upb_Arena* a) {
uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
retry:
while (_upb_Arena_IsTaggedPointer(poc)) {
a = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
}
// compare_exchange or fetch_sub are RMW operations, which are more
@ -5724,10 +5713,10 @@ retry:
return;
}
if (upb_Atomic_CompareExchangeStrongAcqRel(
if (upb_Atomic_CompareExchangeWeak(
&a->parent_or_count, &poc,
_upb_Arena_TaggedFromRefcount(_upb_Arena_RefCountFromTagged(poc) -
1))) {
_upb_Arena_TaggedFromRefcount(_upb_Arena_RefCountFromTagged(poc) - 1),
memory_order_release, memory_order_acquire)) {
// We were >1 and we decremented it successfully, so we are done.
return;
}
@ -5737,31 +5726,30 @@ retry:
goto retry;
}
bool upb_Arena_AddCleanup(upb_Arena* a, void* ud, upb_CleanupFunc* func) {
cleanup_ent* ent;
uint32_t* cleanups = upb_cleanup_pointer(a->cleanup_metadata);
if (!cleanups || _upb_ArenaHas(a) < sizeof(cleanup_ent)) {
if (!upb_Arena_Allocblock(a, 128)) return false; /* Out of memory. */
UPB_ASSERT(_upb_ArenaHas(a) >= sizeof(cleanup_ent));
cleanups = upb_cleanup_pointer(a->cleanup_metadata);
static void _upb_Arena_DoFuseArenaLists(upb_Arena* r1, upb_Arena* r2) {
// Find the region for `r2`'s linked list.
upb_Arena* r1_tail = upb_Atomic_Load(&r1->tail, memory_order_relaxed);
while (true) {
upb_Arena* r1_next = upb_Atomic_Load(&r1_tail->next, memory_order_relaxed);
while (r1_next != NULL) {
// r1->tail was stale. This can happen, but tail should always converge
// on the true tail.
r1_tail = r1_next;
r1_next = upb_Atomic_Load(&r1_tail->next, memory_order_relaxed);
}
if (upb_Atomic_CompareExchangeStrong(&r1_tail->next, &r1_next, r2,
memory_order_relaxed,
memory_order_relaxed)) {
break;
}
}
a->head.end -= sizeof(cleanup_ent);
ent = (cleanup_ent*)a->head.end;
(*cleanups)++;
UPB_UNPOISON_MEMORY_REGION(ent, sizeof(cleanup_ent));
ent->cleanup = func;
ent->ud = ud;
return true;
upb_Arena* r2_tail = upb_Atomic_Load(&r2->tail, memory_order_relaxed);
upb_Atomic_Store(&r1->tail, r2_tail, memory_order_relaxed);
}
bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
// SAFE IN THE PRESENCE OF FUSE/FREE RACES BUT NOT IN THE
// PRESENCE OF FUSE/FUSE RACES!!!
//
static upb_Arena* _upb_Arena_DoFuse(upb_Arena* a1, upb_Arena* a2,
uintptr_t* ref_delta) {
// `parent_or_count` has two disctint modes
// - parent pointer mode
// - refcount mode
@ -5769,72 +5757,85 @@ bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
// In parent pointer mode, it may change what pointer it refers to in the
// tree, but it will always approach a root. Any operation that walks the
// tree to the root may collapse levels of the tree concurrently.
//
// In refcount mode, any free operation may lower the refcount.
//
// Only a fuse operation may increase the refcount.
// Only a fuse operation may switch `parent_or_count` from parent mode to
// refcount mode.
//
// Given that we do not allow fuse/fuse races, we may rely on the invariant
// that only refcounts can change once we have found the root. Because the
// threads doing the fuse must hold references, we can guarantee that no
// refcounts will reach zero concurrently.
upb_Arena* r1 = _upb_Arena_FindRoot(a1);
upb_Arena* r2 = _upb_Arena_FindRoot(a2);
if (r1 == r2) return true; // Already fused.
// Do not fuse initial blocks since we cannot lifetime extend them.
if (upb_cleanup_has_initial_block(r1->cleanup_metadata)) return false;
if (upb_cleanup_has_initial_block(r2->cleanup_metadata)) return false;
_upb_ArenaRoot r1 = _upb_Arena_FindRoot(a1);
_upb_ArenaRoot r2 = _upb_Arena_FindRoot(a2);
// Only allow fuse with a common allocator
if (r1->block_alloc != r2->block_alloc) return false;
if (r1.root == r2.root) return r1.root; // Already fused.
uintptr_t r1_poc = upb_Atomic_LoadAcquire(&r1->parent_or_count);
uintptr_t r2_poc = upb_Atomic_LoadAcquire(&r2->parent_or_count);
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r1_poc));
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r2_poc));
// Keep the tree shallow by joining the smaller tree to the larger.
if (_upb_Arena_RefCountFromTagged(r1_poc) <
_upb_Arena_RefCountFromTagged(r2_poc)) {
upb_Arena* tmp = r1;
// Avoid cycles by always fusing into the root with the lower address.
if ((uintptr_t)r1.root > (uintptr_t)r2.root) {
_upb_ArenaRoot tmp = r1;
r1 = r2;
r2 = tmp;
}
uintptr_t tmp_poc = r1_poc;
r1_poc = r2_poc;
r2_poc = tmp_poc;
// The moment we install `r1` as the parent for `r2` all racing frees may
// immediately begin decrementing `r1`'s refcount (including pending
// increments to that refcount and their frees!). We need to add `r2`'s refs
// now, so that `r1` can withstand any unrefs that come from r2.
//
// Note that while it is possible for `r2`'s refcount to increase
// asynchronously, we will not actually do the reparenting operation below
// unless `r2`'s refcount is unchanged from when we read it.
//
// Note that we may have done this previously, either to this node or a
// different node, during a previous and failed DoFuse() attempt. But we will
// not lose track of these refs because we always add them to our overall
// delta.
uintptr_t r2_untagged_count = r2.tagged_count & ~1;
uintptr_t with_r2_refs = r1.tagged_count + r2_untagged_count;
if (!upb_Atomic_CompareExchangeStrong(
&r1.root->parent_or_count, &r1.tagged_count, with_r2_refs,
memory_order_release, memory_order_acquire)) {
return NULL;
}
// r1 takes over r2's freelist, this must happen before we update
// refcounts since the refcount carriers the memory dependencies.
if (r2->freelist_tail) {
UPB_ASSERT(r2->freelist_tail->next == NULL);
r2->freelist_tail->next = r1->freelist;
r1->freelist = r2->freelist;
// Perform the actual fuse by removing the refs from `r2` and swapping in the
// parent pointer.
if (!upb_Atomic_CompareExchangeStrong(
&r2.root->parent_or_count, &r2.tagged_count,
_upb_Arena_TaggedFromPointer(r1.root), memory_order_release,
memory_order_acquire)) {
// We'll need to remove the excess refs we added to r1 previously.
*ref_delta += r2_untagged_count;
return NULL;
}
// The moment we install `r1` as the parent for `r2` all racing frees may
// immediately begin decrementing `r1`'s refcount. So we must install all the
// refcounts that we know about first to prevent a premature unref to zero.
uint32_t r2_refcount = _upb_Arena_RefCountFromTagged(r2_poc);
upb_Atomic_AddRelease(&r1->parent_or_count, ((uintptr_t)r2_refcount) << 1);
// When installing `r1` as the parent for `r2` racing frees may have changed
// the refcount for `r2` so we need to capture the old value to fix up `r1`'s
// refcount based on the delta from what we saw the first time.
r2_poc = upb_Atomic_ExchangeAcqRel(&r2->parent_or_count,
_upb_Arena_TaggedFromPointer(r1));
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r2_poc));
uint32_t delta_refcount = r2_refcount - _upb_Arena_RefCountFromTagged(r2_poc);
if (delta_refcount != 0) {
upb_Atomic_SubRelease(&r1->parent_or_count, ((uintptr_t)delta_refcount)
<< 1);
// Now that the fuse has been performed (and can no longer fail) we need to
// append `r2` to `r1`'s linked list.
_upb_Arena_DoFuseArenaLists(r1.root, r2.root);
return r1.root;
}
static bool _upb_Arena_FixupRefs(upb_Arena* new_root, uintptr_t ref_delta) {
if (ref_delta == 0) return true; // No fixup required.
uintptr_t poc =
upb_Atomic_Load(&new_root->parent_or_count, memory_order_relaxed);
if (_upb_Arena_IsTaggedPointer(poc)) return false;
uintptr_t with_refs = poc - ref_delta;
UPB_ASSERT(!_upb_Arena_IsTaggedPointer(with_refs));
return upb_Atomic_CompareExchangeStrong(&new_root->parent_or_count, &poc,
with_refs, memory_order_relaxed,
memory_order_relaxed);
}
bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
if (a1 == a2) return true; // trivial fuse
// Do not fuse initial blocks since we cannot lifetime extend them.
// Any other fuse scenario is allowed.
if (upb_Arena_HasInitialBlock(a1) || upb_Arena_HasInitialBlock(a2)) {
return false;
}
// The number of refs we ultimately need to transfer to the new root.
uintptr_t ref_delta = 0;
while (true) {
upb_Arena* new_root = _upb_Arena_DoFuse(a1, a2, &ref_delta);
if (new_root != NULL && _upb_Arena_FixupRefs(new_root, ref_delta)) {
return true;
}
}
return true;
}
@ -5884,7 +5885,8 @@ upb_GetExtension_Status upb_MiniTable_GetOrPromoteExtension(
// Check unknown fields, if available promote.
int field_number = ext_table->field.number;
upb_FindUnknownRet result = upb_MiniTable_FindUnknown(msg, field_number);
upb_FindUnknownRet result = upb_MiniTable_FindUnknown(
msg, field_number, kUpb_WireFormat_DefaultDepthLimit);
if (result.status != kUpb_FindUnknown_Ok) {
return kUpb_GetExtension_NotPresent;
}
@ -5935,7 +5937,8 @@ upb_GetExtensionAsBytes_Status upb_MiniTable_GetExtensionAsBytes(
return kUpb_GetExtensionAsBytes_Ok;
}
int field_number = ext_table->field.number;
upb_FindUnknownRet result = upb_MiniTable_FindUnknown(msg, field_number);
upb_FindUnknownRet result = upb_MiniTable_FindUnknown(
msg, field_number, upb_DecodeOptions_GetMaxDepth(encode_options));
if (result.status != kUpb_FindUnknown_Ok) {
return kUpb_GetExtensionAsBytes_NotPresent;
}
@ -5954,8 +5957,8 @@ static upb_FindUnknownRet upb_FindUnknownRet_ParseError(void) {
}
upb_FindUnknownRet upb_MiniTable_FindUnknown(const upb_Message* msg,
uint32_t field_number) {
const int depth_limit = 100; // TODO: this should be a parameter
uint32_t field_number,
int depth_limit) {
size_t size;
upb_FindUnknownRet ret;
@ -5997,7 +6000,8 @@ upb_UnknownToMessageRet upb_MiniTable_PromoteUnknownToMessage(
upb_Message* message = NULL;
// Callers should check that message is not set first before calling
// PromotoUnknownToMessage.
UPB_ASSERT(mini_table->subs[field->submsg_index].submsg == sub_mini_table);
UPB_ASSERT(upb_MiniTable_GetSubMessageTable(mini_table, field) ==
sub_mini_table);
bool is_oneof = _upb_MiniTableField_InOneOf(field);
if (!is_oneof || _upb_getoneofcase_field(msg, field) == field->number) {
UPB_ASSERT(upb_Message_GetMessage(msg, field, NULL) == NULL);
@ -6005,7 +6009,8 @@ upb_UnknownToMessageRet upb_MiniTable_PromoteUnknownToMessage(
upb_UnknownToMessageRet ret;
ret.status = kUpb_UnknownToMessage_Ok;
do {
unknown = upb_MiniTable_FindUnknown(msg, field->number);
unknown = upb_MiniTable_FindUnknown(
msg, field->number, upb_DecodeOptions_GetMaxDepth(decode_options));
switch (unknown.status) {
case kUpb_FindUnknown_Ok: {
const char* unknown_data = unknown.ptr;
@ -6051,7 +6056,8 @@ upb_UnknownToMessage_Status upb_MiniTable_PromoteUnknownToMessageArray(
// Find all unknowns with given field number and parse.
upb_FindUnknownRet unknown;
do {
unknown = upb_MiniTable_FindUnknown(msg, field->number);
unknown = upb_MiniTable_FindUnknown(
msg, field->number, upb_DecodeOptions_GetMaxDepth(decode_options));
if (unknown.status == kUpb_FindUnknown_Ok) {
upb_UnknownToMessageRet ret = upb_MiniTable_ParseUnknownMessage(
unknown.ptr, unknown.len, mini_table,
@ -6082,7 +6088,7 @@ upb_MapInsertStatus upb_Message_InsertMapEntry(upb_Map* map,
upb_Message* map_entry_message,
upb_Arena* arena) {
const upb_MiniTable* map_entry_mini_table =
mini_table->subs[field->submsg_index].submsg;
mini_table->subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(map_entry_mini_table);
UPB_ASSERT(map_entry_mini_table->field_count == 2);
const upb_MiniTableField* map_entry_key_field =
@ -6107,7 +6113,7 @@ upb_UnknownToMessage_Status upb_MiniTable_PromoteUnknownToMap(
upb_Message* msg, const upb_MiniTable* mini_table,
const upb_MiniTableField* field, int decode_options, upb_Arena* arena) {
const upb_MiniTable* map_entry_mini_table =
mini_table->subs[field->submsg_index].submsg;
mini_table->subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(map_entry_mini_table);
UPB_ASSERT(map_entry_mini_table);
UPB_ASSERT(map_entry_mini_table->field_count == 2);
@ -6115,7 +6121,8 @@ upb_UnknownToMessage_Status upb_MiniTable_PromoteUnknownToMap(
// Find all unknowns with given field number and parse.
upb_FindUnknownRet unknown;
while (1) {
unknown = upb_MiniTable_FindUnknown(msg, field->number);
unknown = upb_MiniTable_FindUnknown(
msg, field->number, upb_DecodeOptions_GetMaxDepth(decode_options));
if (unknown.status != kUpb_FindUnknown_Ok) break;
upb_UnknownToMessageRet ret = upb_MiniTable_ParseUnknownMessage(
unknown.ptr, unknown.len, map_entry_mini_table,
@ -6523,9 +6530,9 @@ static void upb_MiniTable_SetTypeAndSub(upb_MiniTableField* field,
}
if (upb_MiniTable_HasSub(field, msg_modifiers)) {
field->submsg_index = sub_count ? (*sub_count)++ : 0;
field->UPB_PRIVATE(submsg_index) = sub_count ? (*sub_count)++ : 0;
} else {
field->submsg_index = kUpb_NoSub;
field->UPB_PRIVATE(submsg_index) = kUpb_NoSub;
}
if (upb_MtDecoder_FieldIsPackable(field) &&
@ -7397,7 +7404,8 @@ bool upb_MiniTable_SetSubMessage(upb_MiniTable* table,
return false;
}
upb_MiniTableSub* table_sub = (void*)&table->subs[field->submsg_index];
upb_MiniTableSub* table_sub =
(void*)&table->subs[field->UPB_PRIVATE(submsg_index)];
table_sub->submsg = sub;
return true;
}
@ -7409,7 +7417,8 @@ bool upb_MiniTable_SetSubEnum(upb_MiniTable* table, upb_MiniTableField* field,
(uintptr_t)(table->fields + table->field_count));
UPB_ASSERT(sub);
upb_MiniTableSub* table_sub = (void*)&table->subs[field->submsg_index];
upb_MiniTableSub* table_sub =
(void*)&table->subs[field->UPB_PRIVATE(submsg_index)];
table_sub->subenum = sub;
return true;
}
@ -11812,7 +11821,7 @@ static void _upb_Decoder_Munge(int type, wireval* val) {
static upb_Message* _upb_Decoder_NewSubMessage(
upb_Decoder* d, const upb_MiniTableSub* subs,
const upb_MiniTableField* field) {
const upb_MiniTable* subl = subs[field->submsg_index].submsg;
const upb_MiniTable* subl = subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(subl);
upb_Message* msg = _upb_Message_New(subl, &d->arena);
if (!msg) _upb_Decoder_ErrorJmp(d, kUpb_DecodeStatus_OutOfMemory);
@ -11851,7 +11860,7 @@ static const char* _upb_Decoder_DecodeSubMessage(
upb_Decoder* d, const char* ptr, upb_Message* submsg,
const upb_MiniTableSub* subs, const upb_MiniTableField* field, int size) {
int saved_delta = upb_EpsCopyInputStream_PushLimit(&d->input, ptr, size);
const upb_MiniTable* subl = subs[field->submsg_index].submsg;
const upb_MiniTable* subl = subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(subl);
ptr = _upb_Decoder_RecurseSubMessage(d, ptr, submsg, subl, DECODE_NOGROUP);
upb_EpsCopyInputStream_PopLimit(&d->input, ptr, saved_delta);
@ -11882,7 +11891,7 @@ UPB_FORCEINLINE
static const char* _upb_Decoder_DecodeKnownGroup(
upb_Decoder* d, const char* ptr, upb_Message* submsg,
const upb_MiniTableSub* subs, const upb_MiniTableField* field) {
const upb_MiniTable* subl = subs[field->submsg_index].submsg;
const upb_MiniTable* subl = subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(subl);
return _upb_Decoder_DecodeGroup(d, ptr, submsg, subl, field->number);
}
@ -11946,7 +11955,7 @@ static const char* _upb_Decoder_DecodeEnumArray(upb_Decoder* d, const char* ptr,
const upb_MiniTableSub* subs,
const upb_MiniTableField* field,
wireval* val) {
const upb_MiniTableEnum* e = subs[field->submsg_index].subenum;
const upb_MiniTableEnum* e = subs[field->UPB_PRIVATE(submsg_index)].subenum;
if (!_upb_Decoder_CheckEnum(d, ptr, msg, e, field, val)) return ptr;
void* mem = UPB_PTR_AT(_upb_array_ptr(arr), arr->size * 4, void);
arr->size++;
@ -12017,7 +12026,7 @@ static const char* _upb_Decoder_DecodeEnumPacked(
upb_Decoder* d, const char* ptr, upb_Message* msg, upb_Array* arr,
const upb_MiniTableSub* subs, const upb_MiniTableField* field,
wireval* val) {
const upb_MiniTableEnum* e = subs[field->submsg_index].subenum;
const upb_MiniTableEnum* e = subs[field->UPB_PRIVATE(submsg_index)].subenum;
int saved_limit = upb_EpsCopyInputStream_PushLimit(&d->input, ptr, val->size);
char* out = UPB_PTR_AT(_upb_array_ptr(arr), arr->size * 4, void);
while (!_upb_Decoder_IsDone(d, &ptr)) {
@ -12178,7 +12187,7 @@ static const char* _upb_Decoder_DecodeToMap(upb_Decoder* d, const char* ptr,
upb_Map* map = *map_p;
upb_MapEntry ent;
UPB_ASSERT(upb_MiniTableField_Type(field) == kUpb_FieldType_Message);
const upb_MiniTable* entry = subs[field->submsg_index].submsg;
const upb_MiniTable* entry = subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(entry->field_count == 2);
UPB_ASSERT(!upb_IsRepeatedOrMap(&entry->fields[0]));
@ -12239,7 +12248,8 @@ static const char* _upb_Decoder_DecodeToSubMessage(
int type = field->descriptortype;
if (UPB_UNLIKELY(op == kUpb_DecodeOp_Enum) &&
!_upb_Decoder_CheckEnum(d, ptr, msg, subs[field->submsg_index].subenum,
!_upb_Decoder_CheckEnum(d, ptr, msg,
subs[field->UPB_PRIVATE(submsg_index)].subenum,
field, val)) {
return ptr;
}
@ -12555,7 +12565,7 @@ static void _upb_Decoder_CheckUnlinked(const upb_MiniTable* mt,
int* op) {
// If sub-message is not linked, treat as unknown.
if (field->mode & kUpb_LabelFlags_IsExtension) return;
const upb_MiniTableSub* sub = &mt->subs[field->submsg_index];
const upb_MiniTableSub* sub = &mt->subs[field->UPB_PRIVATE(submsg_index)];
if (!sub->submsg) *op = kUpb_DecodeOp_UnknownField;
}
@ -12875,9 +12885,10 @@ static upb_DecodeStatus upb_Decoder_Decode(upb_Decoder* const decoder,
UPB_ASSERT(decoder->status != kUpb_DecodeStatus_Ok);
}
arena->head.ptr = decoder->arena.head.ptr;
arena->head.end = decoder->arena.head.end;
arena->cleanup_metadata = decoder->arena.cleanup_metadata;
_upb_MemBlock* blocks =
upb_Atomic_Load(&decoder->arena.blocks, memory_order_relaxed);
arena->head = decoder->arena.head;
upb_Atomic_Store(&arena->blocks, blocks, memory_order_relaxed);
return decoder->status;
}
@ -12885,26 +12896,31 @@ upb_DecodeStatus upb_Decode(const char* buf, size_t size, void* msg,
const upb_MiniTable* l,
const upb_ExtensionRegistry* extreg, int options,
upb_Arena* arena) {
upb_Decoder state;
upb_Decoder decoder;
unsigned depth = (unsigned)options >> 16;
upb_EpsCopyInputStream_Init(&state.input, &buf, size,
upb_EpsCopyInputStream_Init(&decoder.input, &buf, size,
options & kUpb_DecodeOption_AliasString);
state.extreg = extreg;
state.unknown = NULL;
state.depth = depth ? depth : 64;
state.end_group = DECODE_NOGROUP;
state.options = (uint16_t)options;
state.missing_required = false;
state.arena.head = arena->head;
state.arena.last_size = arena->last_size;
state.arena.cleanup_metadata = arena->cleanup_metadata;
upb_Atomic_Init(&state.arena.parent_or_count,
_upb_Arena_TaggedFromPointer(arena));
state.status = kUpb_DecodeStatus_Ok;
decoder.extreg = extreg;
decoder.unknown = NULL;
decoder.depth = depth ? depth : kUpb_WireFormat_DefaultDepthLimit;
decoder.end_group = DECODE_NOGROUP;
decoder.options = (uint16_t)options;
decoder.missing_required = false;
decoder.status = kUpb_DecodeStatus_Ok;
// Violating the encapsulation of the arena for performance reasons.
// This is a temporary arena that we swap into and swap out of when we are
// done. The temporary arena only needs to be able to handle allocation,
// not fuse or free, so it does not need many of the members to be initialized
// (particularly parent_or_count).
_upb_MemBlock* blocks = upb_Atomic_Load(&arena->blocks, memory_order_relaxed);
decoder.arena.head = arena->head;
decoder.arena.block_alloc = arena->block_alloc;
upb_Atomic_Init(&decoder.arena.blocks, blocks);
return upb_Decoder_Decode(&state, buf, msg, l, arena);
return upb_Decoder_Decode(&decoder, buf, msg, l, arena);
}
#undef OP_FIXPCK_LG2
@ -14122,7 +14138,7 @@ static void encode_scalar(upb_encstate* e, const void* _field_mem,
case kUpb_FieldType_Group: {
size_t size;
void* submsg = *(void**)field_mem;
const upb_MiniTable* subm = subs[f->submsg_index].submsg;
const upb_MiniTable* subm = subs[f->UPB_PRIVATE(submsg_index)].submsg;
if (submsg == NULL) {
return;
}
@ -14136,7 +14152,7 @@ static void encode_scalar(upb_encstate* e, const void* _field_mem,
case kUpb_FieldType_Message: {
size_t size;
void* submsg = *(void**)field_mem;
const upb_MiniTable* subm = subs[f->submsg_index].submsg;
const upb_MiniTable* subm = subs[f->UPB_PRIVATE(submsg_index)].submsg;
if (submsg == NULL) {
return;
}
@ -14225,7 +14241,7 @@ static void encode_array(upb_encstate* e, const upb_Message* msg,
case kUpb_FieldType_Group: {
const void* const* start = _upb_array_constptr(arr);
const void* const* ptr = start + arr->size;
const upb_MiniTable* subm = subs[f->submsg_index].submsg;
const upb_MiniTable* subm = subs[f->UPB_PRIVATE(submsg_index)].submsg;
if (--e->depth == 0) encode_err(e, kUpb_EncodeStatus_MaxDepthExceeded);
do {
size_t size;
@ -14240,7 +14256,7 @@ static void encode_array(upb_encstate* e, const upb_Message* msg,
case kUpb_FieldType_Message: {
const void* const* start = _upb_array_constptr(arr);
const void* const* ptr = start + arr->size;
const upb_MiniTable* subm = subs[f->submsg_index].submsg;
const upb_MiniTable* subm = subs[f->UPB_PRIVATE(submsg_index)].submsg;
if (--e->depth == 0) encode_err(e, kUpb_EncodeStatus_MaxDepthExceeded);
do {
size_t size;
@ -14279,7 +14295,7 @@ static void encode_map(upb_encstate* e, const upb_Message* msg,
const upb_MiniTableSub* subs,
const upb_MiniTableField* f) {
const upb_Map* map = *UPB_PTR_AT(msg, f->offset, const upb_Map*);
const upb_MiniTable* layout = subs[f->submsg_index].submsg;
const upb_MiniTable* layout = subs[f->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(layout->field_count == 2);
if (map == NULL) return;
@ -14482,7 +14498,7 @@ upb_EncodeStatus upb_Encode(const void* msg, const upb_MiniTable* l,
e.buf = NULL;
e.limit = NULL;
e.ptr = NULL;
e.depth = depth ? depth : 64;
e.depth = depth ? depth : kUpb_WireFormat_DefaultDepthLimit;
e.options = options;
_upb_mapsorter_init(&e.sorter);
@ -14571,3 +14587,4 @@ const char* _upb_WireReader_SkipGroup(const char* ptr, uint32_t tag,
#undef UPB_IS_GOOGLE3
#undef UPB_ATOMIC
#undef UPB_USE_C11_ATOMICS
#undef UPB_PRIVATE

@ -167,14 +167,16 @@
#ifdef __GNUC__
#define UPB_USE_C11_ATOMICS
#define UPB_ATOMIC _Atomic
#define UPB_ATOMIC(T) _Atomic(T)
#else
#define UPB_ATOMIC
#define UPB_ATOMIC(T) T
#endif
/* UPB_PTRADD(ptr, ofs): add pointer while avoiding "NULL + 0" UB */
#define UPB_PTRADD(ptr, ofs) ((ofs) ? (ptr) + (ofs) : (ptr))
#define UPB_PRIVATE(x) x##_dont_copy_me__upb_internal_use_only
/* Configure whether fasttable is switched on or not. *************************/
#ifdef __has_attribute
@ -613,12 +615,14 @@ UPB_INLINE void upb_gfree(void* ptr) { upb_free(&upb_alloc_global, ptr); }
typedef struct upb_Arena upb_Arena;
typedef void upb_CleanupFunc(void* context);
// LINT.IfChange(arena_head)
typedef struct {
char *ptr, *end;
} _upb_ArenaHead;
// LINT.ThenChange(//depot/google3/third_party/upb/js/impl/upb_bits/arena.ts:arena_head)
#ifdef __cplusplus
extern "C" {
#endif
@ -629,8 +633,6 @@ extern "C" {
UPB_API upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc);
UPB_API void upb_Arena_Free(upb_Arena* a);
UPB_API bool upb_Arena_AddCleanup(upb_Arena* a, void* ud,
upb_CleanupFunc* func);
UPB_API bool upb_Arena_Fuse(upb_Arena* a, upb_Arena* b);
void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size);
@ -1506,11 +1508,17 @@ size_t upb_Message_ExtensionCount(const upb_Message* msg);
// Must be last.
// LINT.IfChange(mini_table_field_layout)
struct upb_MiniTableField {
uint32_t number;
uint16_t offset;
int16_t presence; // If >0, hasbit_index. If <0, ~oneof_index
uint16_t submsg_index; // kUpb_NoSub if descriptortype != MESSAGE/GROUP/ENUM
// Indexes into `upb_MiniTable.subs`
// Will be set to `kUpb_NoSub` if `descriptortype` != MESSAGE/GROUP/ENUM
uint16_t UPB_PRIVATE(submsg_index);
uint8_t descriptortype;
// upb_FieldMode | upb_LabelFlags | (upb_FieldRep << kUpb_FieldRep_Shift)
@ -1554,6 +1562,8 @@ typedef enum {
#define kUpb_FieldRep_Shift 6
// LINT.ThenChange(//depot/google3/third_party/upb/js/impl/upb_bits/mini_table_field.ts:mini_table_field_layout)
UPB_INLINE upb_FieldRep
_upb_MiniTableField_GetRep(const upb_MiniTableField* field) {
return (upb_FieldRep)(field->mode >> kUpb_FieldRep_Shift);
@ -1645,6 +1655,7 @@ UPB_INLINE uint32_t _upb_getoneofcase_field(const upb_Message* msg,
}
// LINT.ThenChange(GoogleInternalName2)
// LINT.ThenChange(//depot/google3/third_party/upb/js/impl/upb_bits/presence.ts:presence_logic)
#ifdef __cplusplus
} /* extern "C" */
@ -1750,6 +1761,8 @@ typedef enum {
kUpb_ExtMode_IsMapEntry = 4,
} upb_ExtMode;
// LINT.IfChange(mini_table_layout)
// upb_MiniTable represents the memory layout of a given upb_MessageDef.
// The members are public so generated code can initialize them,
// but users MUST NOT directly read or write any of its members.
@ -1773,6 +1786,8 @@ struct upb_MiniTable {
_upb_FastTable_Entry fasttable[];
};
// LINT.ThenChange(//depot/google3/third_party/upb/js/impl/upb_bits/mini_table.ts:presence_logic)
// Map entries aren't actually stored for map fields, they are only used during
// parsing. For parsing, it helps a lot if all map entry messages have the same
// layout. The layout code in mini_table/decode.c will ensure that all map
@ -2292,14 +2307,27 @@ UPB_API_INLINE bool upb_MiniTableField_HasPresence(
}
}
// Returns the MiniTable for this message field. If the field is unlinked,
// returns NULL.
UPB_API_INLINE const upb_MiniTable* upb_MiniTable_GetSubMessageTable(
const upb_MiniTable* mini_table, const upb_MiniTableField* field) {
return mini_table->subs[field->submsg_index].submsg;
UPB_ASSERT(upb_MiniTableField_CType(field) == kUpb_CType_Message);
return mini_table->subs[field->UPB_PRIVATE(submsg_index)].submsg;
}
// Returns the MiniTableEnum for this enum field. If the field is unlinked,
// returns NULL.
UPB_API_INLINE const upb_MiniTableEnum* upb_MiniTable_GetSubEnumTable(
const upb_MiniTable* mini_table, const upb_MiniTableField* field) {
return mini_table->subs[field->submsg_index].subenum;
UPB_ASSERT(upb_MiniTableField_CType(field) == kUpb_CType_Enum);
return mini_table->subs[field->UPB_PRIVATE(submsg_index)].subenum;
}
// Returns true if this MiniTable field is linked to a MiniTable for the
// sub-message.
UPB_API_INLINE bool upb_MiniTable_MessageFieldIsLinked(
const upb_MiniTable* mini_table, const upb_MiniTableField* field) {
return upb_MiniTable_GetSubMessageTable(mini_table, field) != NULL;
}
// If this field is in a oneof, returns the first field in the oneof.
@ -2377,6 +2405,8 @@ UPB_INLINE void _upb_Message_SetPresence(upb_Message* msg,
}
}
// LINT.IfChange(message_raw_fields)
UPB_INLINE bool _upb_MiniTable_ValueIsNonZero(const void* default_val,
const upb_MiniTableField* field) {
char zero[16] = {0};
@ -2415,6 +2445,8 @@ UPB_INLINE void _upb_MiniTable_CopyFieldData(void* to, const void* from,
UPB_UNREACHABLE();
}
// LINT.ThenChange(//depot/google3/third_party/upb/js/impl/upb_bits/message.ts:message_raw_fields)
UPB_INLINE size_t
_upb_MiniTable_ElementSizeLg2(const upb_MiniTableField* field) {
const unsigned char table[] = {
@ -2838,7 +2870,7 @@ UPB_API_INLINE void upb_Message_SetMessage(upb_Message* msg,
UPB_ASSUME(!upb_IsRepeatedOrMap(field));
UPB_ASSUME(_upb_MiniTableField_GetRep(field) ==
UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte));
UPB_ASSERT(mini_table->subs[field->submsg_index].submsg);
UPB_ASSERT(mini_table->subs[field->UPB_PRIVATE(submsg_index)].submsg);
_upb_Message_SetNonExtensionField(msg, field, &sub_message);
}
@ -2850,7 +2882,7 @@ UPB_API_INLINE upb_Message* upb_Message_GetOrCreateMutableMessage(
upb_Message* sub_message = *UPB_PTR_AT(msg, field->offset, upb_Message*);
if (!sub_message) {
const upb_MiniTable* sub_mini_table =
mini_table->subs[field->submsg_index].submsg;
mini_table->subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(sub_mini_table);
sub_message = _upb_Message_New(sub_mini_table, arena);
*UPB_PTR_AT(msg, field->offset, upb_Message*) = sub_message;
@ -2862,7 +2894,7 @@ UPB_API_INLINE upb_Message* upb_Message_GetOrCreateMutableMessage(
UPB_API_INLINE const upb_Array* upb_Message_GetArray(
const upb_Message* msg, const upb_MiniTableField* field) {
_upb_MiniTableField_CheckIsArray(field);
const upb_Array* ret;
upb_Array* ret;
const upb_Array* default_val = NULL;
_upb_Message_GetNonExtensionField(msg, field, &default_val, &ret);
return ret;
@ -2916,7 +2948,7 @@ UPB_API_INLINE bool upb_MiniTableField_IsClosedEnum(
UPB_API_INLINE const upb_Map* upb_Message_GetMap(
const upb_Message* msg, const upb_MiniTableField* field) {
_upb_MiniTableField_CheckIsMap(field);
const upb_Map* ret;
upb_Map* ret;
const upb_Map* default_val = NULL;
_upb_Message_GetNonExtensionField(msg, field, &default_val, &ret);
return ret;
@ -2994,7 +3026,8 @@ typedef struct {
// Finds first occurrence of unknown data by tag id in message.
upb_FindUnknownRet upb_MiniTable_FindUnknown(const upb_Message* msg,
uint32_t field_number);
uint32_t field_number,
int depth_limit);
typedef enum {
kUpb_UnknownToMessage_Ok,
@ -3086,13 +3119,19 @@ enum {
kUpb_DecodeOption_CheckRequired = 2,
};
#define UPB_DECODE_MAXDEPTH(depth) ((depth) << 16)
UPB_INLINE uint32_t upb_DecodeOptions_MaxDepth(uint16_t depth) {
return (uint32_t)depth << 16;
}
UPB_INLINE uint16_t upb_DecodeOptions_GetMaxDepth(uint32_t options) {
return options >> 16;
}
// Enforce an upper bound on recursion depth.
UPB_INLINE int upb_Decode_LimitDepth(uint32_t decode_options, uint32_t limit) {
uint32_t max_depth = decode_options >> 16;
uint32_t max_depth = upb_DecodeOptions_GetMaxDepth(decode_options);
if (max_depth > limit) max_depth = limit;
return (max_depth << 16) | (decode_options & 0xffff);
return upb_DecodeOptions_MaxDepth(max_depth) | (decode_options & 0xffff);
}
typedef enum {
@ -8971,7 +9010,8 @@ UPB_API upb_MiniTableExtension* _upb_MiniTableExtension_Build(
UPB_API_INLINE upb_MiniTableExtension* upb_MiniTableExtension_Build(
const char* data, size_t len, const upb_MiniTable* extendee,
upb_Arena* arena, upb_Status* status) {
upb_MiniTableSub sub = {.submsg = NULL};
upb_MiniTableSub sub;
sub.submsg = NULL;
return _upb_MiniTableExtension_Build(
data, len, extendee, sub, kUpb_MiniTablePlatform_Native, arena, status);
}
@ -8979,7 +9019,8 @@ UPB_API_INLINE upb_MiniTableExtension* upb_MiniTableExtension_Build(
UPB_API_INLINE upb_MiniTableExtension* upb_MiniTableExtension_BuildMessage(
const char* data, size_t len, const upb_MiniTable* extendee,
upb_MiniTable* submsg, upb_Arena* arena, upb_Status* status) {
upb_MiniTableSub sub = {.submsg = submsg};
upb_MiniTableSub sub;
sub.submsg = submsg;
return _upb_MiniTableExtension_Build(
data, len, extendee, sub, kUpb_MiniTablePlatform_Native, arena, status);
}
@ -8987,7 +9028,8 @@ UPB_API_INLINE upb_MiniTableExtension* upb_MiniTableExtension_BuildMessage(
UPB_API_INLINE upb_MiniTableExtension* upb_MiniTableExtension_BuildEnum(
const char* data, size_t len, const upb_MiniTable* extendee,
upb_MiniTableEnum* subenum, upb_Arena* arena, upb_Status* status) {
upb_MiniTableSub sub = {.subenum = subenum};
upb_MiniTableSub sub;
sub.subenum = subenum;
return _upb_MiniTableExtension_Build(
data, len, extendee, sub, kUpb_MiniTablePlatform_Native, arena, status);
}
@ -9994,28 +10036,31 @@ typedef struct _upb_MemBlock _upb_MemBlock;
struct upb_Arena {
_upb_ArenaHead head;
/* Stores cleanup metadata for this arena.
* - a pointer to the current cleanup counter.
* - a boolean indicating if there is an unowned initial block. */
uintptr_t cleanup_metadata;
/* Allocator to allocate arena blocks. We are responsible for freeing these
* when we are destroyed. */
upb_alloc* block_alloc;
uint32_t last_size;
/* When multiple arenas are fused together, each arena points to a parent
* arena (root points to itself). The root tracks how many live arenas
* reference it.
*
* The low bit is tagged:
* 0: pointer to parent
* 1: count, left shifted by one
*/
UPB_ATOMIC uintptr_t parent_or_count;
/* Linked list of blocks to free/cleanup. */
_upb_MemBlock *freelist, *freelist_tail;
// upb_alloc* together with a low bit which signals if there is an initial
// block.
uintptr_t block_alloc;
// When multiple arenas are fused together, each arena points to a parent
// arena (root points to itself). The root tracks how many live arenas
// reference it.
// The low bit is tagged:
// 0: pointer to parent
// 1: count, left shifted by one
UPB_ATOMIC(uintptr_t) parent_or_count;
// All nodes that are fused together are in a singly-linked list.
UPB_ATOMIC(upb_Arena*) next; // NULL at end of list.
// The last element of the linked list. This is present only as an
// optimization, so that we do not have to iterate over all members for every
// fuse. Only significant for an arena root. In other cases it is ignored.
UPB_ATOMIC(upb_Arena*) tail; // == self when no other list members.
// Linked list of blocks to free/cleanup. Atomic only for the benefit of
// upb_Arena_SpaceAllocated().
UPB_ATOMIC(_upb_MemBlock*) blocks;
};
UPB_INLINE bool _upb_Arena_IsTaggedRefcount(uintptr_t parent_or_count) {
@ -10026,13 +10071,13 @@ UPB_INLINE bool _upb_Arena_IsTaggedPointer(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 0;
}
UPB_INLINE uint32_t _upb_Arena_RefCountFromTagged(uintptr_t parent_or_count) {
UPB_INLINE uintptr_t _upb_Arena_RefCountFromTagged(uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count >> 1;
}
UPB_INLINE uintptr_t _upb_Arena_TaggedFromRefcount(uint32_t refcount) {
uintptr_t parent_or_count = (((uintptr_t)refcount) << 1) | 1;
UPB_INLINE uintptr_t _upb_Arena_TaggedFromRefcount(uintptr_t refcount) {
uintptr_t parent_or_count = (refcount << 1) | 1;
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count;
}
@ -10048,6 +10093,21 @@ UPB_INLINE uintptr_t _upb_Arena_TaggedFromPointer(upb_Arena* a) {
return parent_or_count;
}
UPB_INLINE upb_alloc* upb_Arena_BlockAlloc(upb_Arena* arena) {
return (upb_alloc*)(arena->block_alloc & ~0x1);
}
UPB_INLINE uintptr_t upb_Arena_MakeBlockAlloc(upb_alloc* alloc,
bool has_initial) {
uintptr_t alloc_uint = (uintptr_t)alloc;
UPB_ASSERT((alloc_uint & 1) == 0);
return alloc_uint | (has_initial ? 1 : 0);
}
UPB_INLINE bool upb_Arena_HasInitialBlock(upb_Arena* arena) {
return arena->block_alloc & 0x1;
}
#endif /* UPB_MEM_ARENA_INTERNAL_H_ */
@ -10060,79 +10120,75 @@ UPB_INLINE uintptr_t _upb_Arena_TaggedFromPointer(upb_Arena* a) {
#include <stdatomic.h>
#include <stdbool.h>
UPB_INLINE void upb_Atomic_Init(_Atomic uintptr_t* addr, uintptr_t val) {
atomic_init(addr, val);
}
UPB_INLINE uintptr_t upb_Atomic_LoadAcquire(_Atomic uintptr_t* addr) {
return atomic_load_explicit(addr, memory_order_acquire);
}
UPB_INLINE void upb_Atomic_StoreRelaxed(_Atomic uintptr_t* addr,
uintptr_t val) {
atomic_store_explicit(addr, val, memory_order_relaxed);
}
#define upb_Atomic_Init(addr, val) atomic_init(addr, val)
#define upb_Atomic_Load(addr, order) atomic_load_explicit(addr, order)
#define upb_Atomic_Store(addr, val, order) \
atomic_store_explicit(addr, val, order)
#define upb_Atomic_Add(addr, val, order) \
atomic_fetch_add_explicit(addr, val, order)
#define upb_Atomic_Sub(addr, val, order) \
atomic_fetch_sub_explicit(addr, val, memory_order_release);
#define upb_Atomic_CompareExchangeStrong(addr, expected, desired, \
success_order, failure_order) \
atomic_compare_exchange_strong_explicit(addr, expected, desired, \
success_order, failure_order)
#define upb_Atomic_CompareExchangeWeak(addr, expected, desired, success_order, \
failure_order) \
atomic_compare_exchange_weak_explicit(addr, expected, desired, \
success_order, failure_order)
UPB_INLINE void upb_Atomic_AddRelease(_Atomic uintptr_t* addr, uintptr_t val) {
atomic_fetch_add_explicit(addr, val, memory_order_release);
}
#else // !UPB_USE_C11_ATOMICS
UPB_INLINE void upb_Atomic_SubRelease(_Atomic uintptr_t* addr, uintptr_t val) {
atomic_fetch_sub_explicit(addr, val, memory_order_release);
}
#include <string.h>
UPB_INLINE uintptr_t upb_Atomic_ExchangeAcqRel(_Atomic uintptr_t* addr,
uintptr_t val) {
return atomic_exchange_explicit(addr, val, memory_order_acq_rel);
#define upb_Atomic_Init(addr, val) (*addr = val)
#define upb_Atomic_Load(addr, order) (*addr)
#define upb_Atomic_Store(addr, val, order) (*(addr) = val)
#define upb_Atomic_Add(addr, val, order) (*(addr) += val)
#define upb_Atomic_Sub(addr, val, order) (*(addr) -= val)
// `addr` and `expected` are logically double pointers.
UPB_INLINE bool _upb_NonAtomic_CompareExchangeStrongP(void* addr,
void* expected,
void* desired) {
if (memcmp(addr, expected, sizeof(desired)) == 0) {
memcpy(addr, &desired, sizeof(desired));
return true;
} else {
memcpy(expected, addr, sizeof(desired));
return false;
}
}
UPB_INLINE bool upb_Atomic_CompareExchangeStrongAcqRel(_Atomic uintptr_t* addr,
uintptr_t* expected,
uintptr_t desired) {
return atomic_compare_exchange_strong_explicit(
addr, expected, desired, memory_order_release, memory_order_acquire);
}
#define upb_Atomic_CompareExchangeStrong(addr, expected, desired, \
success_order, failure_order) \
_upb_NonAtomic_CompareExchangeStrongP((void*)addr, (void*)expected, \
(void*)desired)
#define upb_Atomic_CompareExchangeWeak(addr, expected, desired, success_order, \
failure_order) \
upb_Atomic_CompareExchangeStrong(addr, expected, desired, 0, 0)
#else // !UPB_USE_C11_ATOMICS
#endif
UPB_INLINE void upb_Atomic_Init(uintptr_t* addr, uintptr_t val) { *addr = val; }
UPB_INLINE uintptr_t upb_Atomic_LoadAcquire(uintptr_t* addr) { return *addr; }
#endif // UPB_PORT_ATOMIC_H_
UPB_INLINE void upb_Atomic_StoreRelaxed(uintptr_t* addr, uintptr_t val) {
*addr = val;
}
#ifndef UPB_WIRE_COMMON_H_
#define UPB_WIRE_COMMON_H_
UPB_INLINE void upb_Atomic_AddRelease(uintptr_t* addr, uintptr_t val) {
*addr += val;
}
// Must be last.
UPB_INLINE void upb_Atomic_SubRelease(uintptr_t* addr, uintptr_t val) {
*addr -= val;
}
#ifdef __cplusplus
extern "C" {
#endif
UPB_INLINE uintptr_t upb_Atomic_ExchangeAcqRel(uintptr_t* addr, uintptr_t val) {
uintptr_t ret = *addr;
*addr = val;
return ret;
}
#define kUpb_WireFormat_DefaultDepthLimit 100
UPB_INLINE bool upb_Atomic_CompareExchangeStrongAcqRel(uintptr_t* addr,
uintptr_t* expected,
uintptr_t desired) {
if (*addr == *expected) {
*addr = desired;
return true;
} else {
*expected = *addr;
return false;
}
#ifdef __cplusplus
}
#endif
#endif // UPB_PORT_ATOMIC_H_
#endif // UPB_WIRE_COMMON_H_
#ifndef UPB_WIRE_READER_H_
#define UPB_WIRE_READER_H_
@ -11262,3 +11318,4 @@ UPB_INLINE uint32_t _upb_FastDecoder_LoadTag(const char* ptr) {
#undef UPB_IS_GOOGLE3
#undef UPB_ATOMIC
#undef UPB_USE_C11_ATOMICS
#undef UPB_PRIVATE

@ -151,7 +151,7 @@ def protobuf_deps():
_github_archive(
name = "upb",
repo = "https://github.com/protocolbuffers/upb",
commit = "eda3f17ed398b0cd298e4c9f9ea58cfe7d69cab2",
sha256 = "f9b1f308e1b60b91b930301b43a8f27ba96e3c16fb527b581e693655313259f4",
commit = "e6dd5bfefe4829ff105b02e335520d4e35b888d2",
sha256 = "4ab6f7f7cfc2b95b5ab242ecb7cdcefe3c44c1267b56e6bd1bad4accae1030fd",
patches = ["@com_google_protobuf//build_defs:upb.patch"],
)

@ -980,7 +980,7 @@ static VALUE Message_decode(int argc, VALUE* argv, VALUE klass) {
rb_hash_lookup(hash_args, ID2SYM(rb_intern("recursion_limit")));
if (depth != Qnil && TYPE(depth) == T_FIXNUM) {
options |= UPB_DECODE_MAXDEPTH(FIX2INT(depth));
options |= upb_DecodeOptions_MaxDepth(FIX2INT(depth));
}
}
@ -1098,7 +1098,7 @@ static VALUE Message_encode(int argc, VALUE* argv, VALUE klass) {
rb_hash_lookup(hash_args, ID2SYM(rb_intern("recursion_limit")));
if (depth != Qnil && TYPE(depth) == T_FIXNUM) {
options |= UPB_DECODE_MAXDEPTH(FIX2INT(depth));
options |= upb_DecodeOptions_MaxDepth(FIX2INT(depth));
}
}

@ -168,14 +168,16 @@
#ifdef __GNUC__
#define UPB_USE_C11_ATOMICS
#define UPB_ATOMIC _Atomic
#define UPB_ATOMIC(T) _Atomic(T)
#else
#define UPB_ATOMIC
#define UPB_ATOMIC(T) T
#endif
/* UPB_PTRADD(ptr, ofs): add pointer while avoiding "NULL + 0" UB */
#define UPB_PTRADD(ptr, ofs) ((ofs) ? (ptr) + (ofs) : (ptr))
#define UPB_PRIVATE(x) x##_dont_copy_me__upb_internal_use_only
/* Configure whether fasttable is switched on or not. *************************/
#ifdef __has_attribute
@ -5128,39 +5130,28 @@ upb_alloc upb_alloc_global = {&upb_global_allocfunc};
// Must be last.
static uint32_t* upb_cleanup_pointer(uintptr_t cleanup_metadata) {
return (uint32_t*)(cleanup_metadata & ~0x1);
}
static bool upb_cleanup_has_initial_block(uintptr_t cleanup_metadata) {
return cleanup_metadata & 0x1;
}
static uintptr_t upb_cleanup_metadata(uint32_t* cleanup,
bool has_initial_block) {
return (uintptr_t)cleanup | has_initial_block;
}
struct _upb_MemBlock {
struct _upb_MemBlock* next;
// Atomic only for the benefit of SpaceAllocated().
UPB_ATOMIC(_upb_MemBlock*) next;
uint32_t size;
uint32_t cleanups;
// Data follows.
};
typedef struct cleanup_ent {
upb_CleanupFunc* cleanup;
void* ud;
} cleanup_ent;
static const size_t memblock_reserve =
UPB_ALIGN_UP(sizeof(_upb_MemBlock), UPB_MALLOC_ALIGN);
static upb_Arena* _upb_Arena_FindRoot(upb_Arena* a) {
uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
typedef struct _upb_ArenaRoot {
upb_Arena* root;
uintptr_t tagged_count;
} _upb_ArenaRoot;
static _upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
upb_Arena* next = _upb_Arena_PointerFromTagged(poc);
uintptr_t next_poc = upb_Atomic_LoadAcquire(&next->parent_or_count);
UPB_ASSERT(a != next);
uintptr_t next_poc =
upb_Atomic_Load(&next->parent_or_count, memory_order_acquire);
if (_upb_Arena_IsTaggedPointer(next_poc)) {
// To keep complexity down, we lazily collapse levels of the tree. This
@ -5182,23 +5173,27 @@ static upb_Arena* _upb_Arena_FindRoot(upb_Arena* a) {
// further away over time, but the path towards that root will continue to
// be valid and the creation of the path carries all the memory orderings
// required.
upb_Atomic_StoreRelaxed(&a->parent_or_count, next_poc);
UPB_ASSERT(a != _upb_Arena_PointerFromTagged(next_poc));
upb_Atomic_Store(&a->parent_or_count, next_poc, memory_order_relaxed);
}
a = next;
poc = next_poc;
}
return a;
return (_upb_ArenaRoot){.root = a, .tagged_count = poc};
}
size_t upb_Arena_SpaceAllocated(upb_Arena* arena) {
arena = _upb_Arena_FindRoot(arena);
arena = _upb_Arena_FindRoot(arena).root;
size_t memsize = 0;
_upb_MemBlock* block = arena->freelist;
while (block) {
memsize += sizeof(_upb_MemBlock) + block->size;
block = block->next;
while (arena != NULL) {
_upb_MemBlock* block =
upb_Atomic_Load(&arena->blocks, memory_order_relaxed);
while (block != NULL) {
memsize += sizeof(_upb_MemBlock) + block->size;
block = upb_Atomic_Load(&block->next, memory_order_relaxed);
}
arena = upb_Atomic_Load(&arena->next, memory_order_relaxed);
}
return memsize;
@ -5207,58 +5202,55 @@ size_t upb_Arena_SpaceAllocated(upb_Arena* arena) {
uint32_t upb_Arena_DebugRefCount(upb_Arena* a) {
// These loads could probably be relaxed, but given that this is debug-only,
// it's not worth introducing a new variant for it.
uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
a = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
}
return _upb_Arena_RefCountFromTagged(poc);
}
static void upb_Arena_addblock(upb_Arena* a, upb_Arena* root, void* ptr,
size_t size) {
static void upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
_upb_MemBlock* block = ptr;
/* The block is for arena |a|, but should appear in the freelist of |root|. */
block->next = root->freelist;
// Insert into linked list.
block->size = (uint32_t)size;
block->cleanups = 0;
root->freelist = block;
a->last_size = block->size;
if (!root->freelist_tail) root->freelist_tail = block;
upb_Atomic_Init(&block->next, a->blocks);
upb_Atomic_Store(&a->blocks, block, memory_order_release);
a->head.ptr = UPB_PTR_AT(block, memblock_reserve, char);
a->head.end = UPB_PTR_AT(block, size, char);
a->cleanup_metadata = upb_cleanup_metadata(
&block->cleanups, upb_cleanup_has_initial_block(a->cleanup_metadata));
UPB_POISON_MEMORY_REGION(a->head.ptr, a->head.end - a->head.ptr);
}
static bool upb_Arena_Allocblock(upb_Arena* a, size_t size) {
upb_Arena* root = _upb_Arena_FindRoot(a);
size_t block_size = UPB_MAX(size, a->last_size * 2) + memblock_reserve;
_upb_MemBlock* block = upb_malloc(root->block_alloc, block_size);
static bool upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
if (!a->block_alloc) return false;
_upb_MemBlock* last_block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
size_t last_size = last_block != NULL ? last_block->size : 128;
size_t block_size = UPB_MAX(size, last_size * 2) + memblock_reserve;
_upb_MemBlock* block = upb_malloc(upb_Arena_BlockAlloc(a), block_size);
if (!block) return false;
upb_Arena_addblock(a, root, block, block_size);
upb_Arena_AddBlock(a, block, block_size);
return true;
}
void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size) {
if (!upb_Arena_Allocblock(a, size)) return NULL; /* Out of memory. */
if (!upb_Arena_AllocBlock(a, size)) return NULL; /* Out of memory. */
UPB_ASSERT(_upb_ArenaHas(a) >= size);
return upb_Arena_Malloc(a, size);
}
/* Public Arena API ***********************************************************/
static upb_Arena* arena_initslow(void* mem, size_t n, upb_alloc* alloc) {
static upb_Arena* upb_Arena_InitSlow(upb_alloc* alloc) {
const size_t first_block_overhead = sizeof(upb_Arena) + memblock_reserve;
upb_Arena* a;
/* We need to malloc the initial block. */
n = first_block_overhead + 256;
char* mem;
size_t n = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, n))) {
return NULL;
}
@ -5266,13 +5258,13 @@ static upb_Arena* arena_initslow(void* mem, size_t n, upb_alloc* alloc) {
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
n -= sizeof(*a);
a->block_alloc = alloc;
a->block_alloc = upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
a->freelist = NULL;
a->freelist_tail = NULL;
a->cleanup_metadata = upb_cleanup_metadata(NULL, false);
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
upb_Arena_addblock(a, a, mem, n);
upb_Arena_AddBlock(a, mem, n);
return a;
}
@ -5293,51 +5285,48 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_Arena));
if (UPB_UNLIKELY(n < sizeof(upb_Arena))) {
return arena_initslow(mem, n, alloc);
return upb_Arena_InitSlow(alloc);
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
a->block_alloc = alloc;
upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
a->last_size = UPB_MAX(128, n);
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
a->block_alloc = upb_Arena_MakeBlockAlloc(alloc, 1);
a->head.ptr = mem;
a->head.end = UPB_PTR_AT(mem, n - sizeof(*a), char);
a->freelist = NULL;
a->freelist_tail = NULL;
a->cleanup_metadata = upb_cleanup_metadata(NULL, true);
return a;
}
static void arena_dofree(upb_Arena* a) {
_upb_MemBlock* block = a->freelist;
UPB_ASSERT(_upb_Arena_RefCountFromTagged(a->parent_or_count) == 1);
while (block) {
/* Load first since we are deleting block. */
_upb_MemBlock* next = block->next;
if (block->cleanups > 0) {
cleanup_ent* end = UPB_PTR_AT(block, block->size, void);
cleanup_ent* ptr = end - block->cleanups;
for (; ptr < end; ptr++) {
ptr->cleanup(ptr->ud);
}
while (a != NULL) {
// Load first since arena itself is likely from one of its blocks.
upb_Arena* next_arena =
(upb_Arena*)upb_Atomic_Load(&a->next, memory_order_acquire);
upb_alloc* block_alloc = upb_Arena_BlockAlloc(a);
_upb_MemBlock* block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
while (block != NULL) {
// Load first since we are deleting block.
_upb_MemBlock* next_block =
upb_Atomic_Load(&block->next, memory_order_acquire);
upb_free(block_alloc, block);
block = next_block;
}
upb_free(a->block_alloc, block);
block = next;
a = next_arena;
}
}
void upb_Arena_Free(upb_Arena* a) {
uintptr_t poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
retry:
while (_upb_Arena_IsTaggedPointer(poc)) {
a = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_LoadAcquire(&a->parent_or_count);
poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
}
// compare_exchange or fetch_sub are RMW operations, which are more
@ -5348,10 +5337,10 @@ retry:
return;
}
if (upb_Atomic_CompareExchangeStrongAcqRel(
if (upb_Atomic_CompareExchangeWeak(
&a->parent_or_count, &poc,
_upb_Arena_TaggedFromRefcount(_upb_Arena_RefCountFromTagged(poc) -
1))) {
_upb_Arena_TaggedFromRefcount(_upb_Arena_RefCountFromTagged(poc) - 1),
memory_order_release, memory_order_acquire)) {
// We were >1 and we decremented it successfully, so we are done.
return;
}
@ -5361,31 +5350,30 @@ retry:
goto retry;
}
bool upb_Arena_AddCleanup(upb_Arena* a, void* ud, upb_CleanupFunc* func) {
cleanup_ent* ent;
uint32_t* cleanups = upb_cleanup_pointer(a->cleanup_metadata);
if (!cleanups || _upb_ArenaHas(a) < sizeof(cleanup_ent)) {
if (!upb_Arena_Allocblock(a, 128)) return false; /* Out of memory. */
UPB_ASSERT(_upb_ArenaHas(a) >= sizeof(cleanup_ent));
cleanups = upb_cleanup_pointer(a->cleanup_metadata);
static void _upb_Arena_DoFuseArenaLists(upb_Arena* r1, upb_Arena* r2) {
// Find the region for `r2`'s linked list.
upb_Arena* r1_tail = upb_Atomic_Load(&r1->tail, memory_order_relaxed);
while (true) {
upb_Arena* r1_next = upb_Atomic_Load(&r1_tail->next, memory_order_relaxed);
while (r1_next != NULL) {
// r1->tail was stale. This can happen, but tail should always converge
// on the true tail.
r1_tail = r1_next;
r1_next = upb_Atomic_Load(&r1_tail->next, memory_order_relaxed);
}
if (upb_Atomic_CompareExchangeStrong(&r1_tail->next, &r1_next, r2,
memory_order_relaxed,
memory_order_relaxed)) {
break;
}
}
a->head.end -= sizeof(cleanup_ent);
ent = (cleanup_ent*)a->head.end;
(*cleanups)++;
UPB_UNPOISON_MEMORY_REGION(ent, sizeof(cleanup_ent));
ent->cleanup = func;
ent->ud = ud;
return true;
upb_Arena* r2_tail = upb_Atomic_Load(&r2->tail, memory_order_relaxed);
upb_Atomic_Store(&r1->tail, r2_tail, memory_order_relaxed);
}
bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
// SAFE IN THE PRESENCE OF FUSE/FREE RACES BUT NOT IN THE
// PRESENCE OF FUSE/FUSE RACES!!!
//
static upb_Arena* _upb_Arena_DoFuse(upb_Arena* a1, upb_Arena* a2,
uintptr_t* ref_delta) {
// `parent_or_count` has two disctint modes
// - parent pointer mode
// - refcount mode
@ -5393,72 +5381,85 @@ bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
// In parent pointer mode, it may change what pointer it refers to in the
// tree, but it will always approach a root. Any operation that walks the
// tree to the root may collapse levels of the tree concurrently.
//
// In refcount mode, any free operation may lower the refcount.
//
// Only a fuse operation may increase the refcount.
// Only a fuse operation may switch `parent_or_count` from parent mode to
// refcount mode.
//
// Given that we do not allow fuse/fuse races, we may rely on the invariant
// that only refcounts can change once we have found the root. Because the
// threads doing the fuse must hold references, we can guarantee that no
// refcounts will reach zero concurrently.
upb_Arena* r1 = _upb_Arena_FindRoot(a1);
upb_Arena* r2 = _upb_Arena_FindRoot(a2);
if (r1 == r2) return true; // Already fused.
// Do not fuse initial blocks since we cannot lifetime extend them.
if (upb_cleanup_has_initial_block(r1->cleanup_metadata)) return false;
if (upb_cleanup_has_initial_block(r2->cleanup_metadata)) return false;
_upb_ArenaRoot r1 = _upb_Arena_FindRoot(a1);
_upb_ArenaRoot r2 = _upb_Arena_FindRoot(a2);
// Only allow fuse with a common allocator
if (r1->block_alloc != r2->block_alloc) return false;
if (r1.root == r2.root) return r1.root; // Already fused.
uintptr_t r1_poc = upb_Atomic_LoadAcquire(&r1->parent_or_count);
uintptr_t r2_poc = upb_Atomic_LoadAcquire(&r2->parent_or_count);
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r1_poc));
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r2_poc));
// Keep the tree shallow by joining the smaller tree to the larger.
if (_upb_Arena_RefCountFromTagged(r1_poc) <
_upb_Arena_RefCountFromTagged(r2_poc)) {
upb_Arena* tmp = r1;
// Avoid cycles by always fusing into the root with the lower address.
if ((uintptr_t)r1.root > (uintptr_t)r2.root) {
_upb_ArenaRoot tmp = r1;
r1 = r2;
r2 = tmp;
}
uintptr_t tmp_poc = r1_poc;
r1_poc = r2_poc;
r2_poc = tmp_poc;
// The moment we install `r1` as the parent for `r2` all racing frees may
// immediately begin decrementing `r1`'s refcount (including pending
// increments to that refcount and their frees!). We need to add `r2`'s refs
// now, so that `r1` can withstand any unrefs that come from r2.
//
// Note that while it is possible for `r2`'s refcount to increase
// asynchronously, we will not actually do the reparenting operation below
// unless `r2`'s refcount is unchanged from when we read it.
//
// Note that we may have done this previously, either to this node or a
// different node, during a previous and failed DoFuse() attempt. But we will
// not lose track of these refs because we always add them to our overall
// delta.
uintptr_t r2_untagged_count = r2.tagged_count & ~1;
uintptr_t with_r2_refs = r1.tagged_count + r2_untagged_count;
if (!upb_Atomic_CompareExchangeStrong(
&r1.root->parent_or_count, &r1.tagged_count, with_r2_refs,
memory_order_release, memory_order_acquire)) {
return NULL;
}
// r1 takes over r2's freelist, this must happen before we update
// refcounts since the refcount carriers the memory dependencies.
if (r2->freelist_tail) {
UPB_ASSERT(r2->freelist_tail->next == NULL);
r2->freelist_tail->next = r1->freelist;
r1->freelist = r2->freelist;
// Perform the actual fuse by removing the refs from `r2` and swapping in the
// parent pointer.
if (!upb_Atomic_CompareExchangeStrong(
&r2.root->parent_or_count, &r2.tagged_count,
_upb_Arena_TaggedFromPointer(r1.root), memory_order_release,
memory_order_acquire)) {
// We'll need to remove the excess refs we added to r1 previously.
*ref_delta += r2_untagged_count;
return NULL;
}
// The moment we install `r1` as the parent for `r2` all racing frees may
// immediately begin decrementing `r1`'s refcount. So we must install all the
// refcounts that we know about first to prevent a premature unref to zero.
uint32_t r2_refcount = _upb_Arena_RefCountFromTagged(r2_poc);
upb_Atomic_AddRelease(&r1->parent_or_count, ((uintptr_t)r2_refcount) << 1);
// When installing `r1` as the parent for `r2` racing frees may have changed
// the refcount for `r2` so we need to capture the old value to fix up `r1`'s
// refcount based on the delta from what we saw the first time.
r2_poc = upb_Atomic_ExchangeAcqRel(&r2->parent_or_count,
_upb_Arena_TaggedFromPointer(r1));
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(r2_poc));
uint32_t delta_refcount = r2_refcount - _upb_Arena_RefCountFromTagged(r2_poc);
if (delta_refcount != 0) {
upb_Atomic_SubRelease(&r1->parent_or_count, ((uintptr_t)delta_refcount)
<< 1);
// Now that the fuse has been performed (and can no longer fail) we need to
// append `r2` to `r1`'s linked list.
_upb_Arena_DoFuseArenaLists(r1.root, r2.root);
return r1.root;
}
static bool _upb_Arena_FixupRefs(upb_Arena* new_root, uintptr_t ref_delta) {
if (ref_delta == 0) return true; // No fixup required.
uintptr_t poc =
upb_Atomic_Load(&new_root->parent_or_count, memory_order_relaxed);
if (_upb_Arena_IsTaggedPointer(poc)) return false;
uintptr_t with_refs = poc - ref_delta;
UPB_ASSERT(!_upb_Arena_IsTaggedPointer(with_refs));
return upb_Atomic_CompareExchangeStrong(&new_root->parent_or_count, &poc,
with_refs, memory_order_relaxed,
memory_order_relaxed);
}
bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
if (a1 == a2) return true; // trivial fuse
// Do not fuse initial blocks since we cannot lifetime extend them.
// Any other fuse scenario is allowed.
if (upb_Arena_HasInitialBlock(a1) || upb_Arena_HasInitialBlock(a2)) {
return false;
}
// The number of refs we ultimately need to transfer to the new root.
uintptr_t ref_delta = 0;
while (true) {
upb_Arena* new_root = _upb_Arena_DoFuse(a1, a2, &ref_delta);
if (new_root != NULL && _upb_Arena_FixupRefs(new_root, ref_delta)) {
return true;
}
}
return true;
}
@ -5508,7 +5509,8 @@ upb_GetExtension_Status upb_MiniTable_GetOrPromoteExtension(
// Check unknown fields, if available promote.
int field_number = ext_table->field.number;
upb_FindUnknownRet result = upb_MiniTable_FindUnknown(msg, field_number);
upb_FindUnknownRet result = upb_MiniTable_FindUnknown(
msg, field_number, kUpb_WireFormat_DefaultDepthLimit);
if (result.status != kUpb_FindUnknown_Ok) {
return kUpb_GetExtension_NotPresent;
}
@ -5559,7 +5561,8 @@ upb_GetExtensionAsBytes_Status upb_MiniTable_GetExtensionAsBytes(
return kUpb_GetExtensionAsBytes_Ok;
}
int field_number = ext_table->field.number;
upb_FindUnknownRet result = upb_MiniTable_FindUnknown(msg, field_number);
upb_FindUnknownRet result = upb_MiniTable_FindUnknown(
msg, field_number, upb_DecodeOptions_GetMaxDepth(encode_options));
if (result.status != kUpb_FindUnknown_Ok) {
return kUpb_GetExtensionAsBytes_NotPresent;
}
@ -5578,8 +5581,8 @@ static upb_FindUnknownRet upb_FindUnknownRet_ParseError(void) {
}
upb_FindUnknownRet upb_MiniTable_FindUnknown(const upb_Message* msg,
uint32_t field_number) {
const int depth_limit = 100; // TODO: this should be a parameter
uint32_t field_number,
int depth_limit) {
size_t size;
upb_FindUnknownRet ret;
@ -5621,7 +5624,8 @@ upb_UnknownToMessageRet upb_MiniTable_PromoteUnknownToMessage(
upb_Message* message = NULL;
// Callers should check that message is not set first before calling
// PromotoUnknownToMessage.
UPB_ASSERT(mini_table->subs[field->submsg_index].submsg == sub_mini_table);
UPB_ASSERT(upb_MiniTable_GetSubMessageTable(mini_table, field) ==
sub_mini_table);
bool is_oneof = _upb_MiniTableField_InOneOf(field);
if (!is_oneof || _upb_getoneofcase_field(msg, field) == field->number) {
UPB_ASSERT(upb_Message_GetMessage(msg, field, NULL) == NULL);
@ -5629,7 +5633,8 @@ upb_UnknownToMessageRet upb_MiniTable_PromoteUnknownToMessage(
upb_UnknownToMessageRet ret;
ret.status = kUpb_UnknownToMessage_Ok;
do {
unknown = upb_MiniTable_FindUnknown(msg, field->number);
unknown = upb_MiniTable_FindUnknown(
msg, field->number, upb_DecodeOptions_GetMaxDepth(decode_options));
switch (unknown.status) {
case kUpb_FindUnknown_Ok: {
const char* unknown_data = unknown.ptr;
@ -5675,7 +5680,8 @@ upb_UnknownToMessage_Status upb_MiniTable_PromoteUnknownToMessageArray(
// Find all unknowns with given field number and parse.
upb_FindUnknownRet unknown;
do {
unknown = upb_MiniTable_FindUnknown(msg, field->number);
unknown = upb_MiniTable_FindUnknown(
msg, field->number, upb_DecodeOptions_GetMaxDepth(decode_options));
if (unknown.status == kUpb_FindUnknown_Ok) {
upb_UnknownToMessageRet ret = upb_MiniTable_ParseUnknownMessage(
unknown.ptr, unknown.len, mini_table,
@ -5706,7 +5712,7 @@ upb_MapInsertStatus upb_Message_InsertMapEntry(upb_Map* map,
upb_Message* map_entry_message,
upb_Arena* arena) {
const upb_MiniTable* map_entry_mini_table =
mini_table->subs[field->submsg_index].submsg;
mini_table->subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(map_entry_mini_table);
UPB_ASSERT(map_entry_mini_table->field_count == 2);
const upb_MiniTableField* map_entry_key_field =
@ -5731,7 +5737,7 @@ upb_UnknownToMessage_Status upb_MiniTable_PromoteUnknownToMap(
upb_Message* msg, const upb_MiniTable* mini_table,
const upb_MiniTableField* field, int decode_options, upb_Arena* arena) {
const upb_MiniTable* map_entry_mini_table =
mini_table->subs[field->submsg_index].submsg;
mini_table->subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(map_entry_mini_table);
UPB_ASSERT(map_entry_mini_table);
UPB_ASSERT(map_entry_mini_table->field_count == 2);
@ -5739,7 +5745,8 @@ upb_UnknownToMessage_Status upb_MiniTable_PromoteUnknownToMap(
// Find all unknowns with given field number and parse.
upb_FindUnknownRet unknown;
while (1) {
unknown = upb_MiniTable_FindUnknown(msg, field->number);
unknown = upb_MiniTable_FindUnknown(
msg, field->number, upb_DecodeOptions_GetMaxDepth(decode_options));
if (unknown.status != kUpb_FindUnknown_Ok) break;
upb_UnknownToMessageRet ret = upb_MiniTable_ParseUnknownMessage(
unknown.ptr, unknown.len, map_entry_mini_table,
@ -6147,9 +6154,9 @@ static void upb_MiniTable_SetTypeAndSub(upb_MiniTableField* field,
}
if (upb_MiniTable_HasSub(field, msg_modifiers)) {
field->submsg_index = sub_count ? (*sub_count)++ : 0;
field->UPB_PRIVATE(submsg_index) = sub_count ? (*sub_count)++ : 0;
} else {
field->submsg_index = kUpb_NoSub;
field->UPB_PRIVATE(submsg_index) = kUpb_NoSub;
}
if (upb_MtDecoder_FieldIsPackable(field) &&
@ -7021,7 +7028,8 @@ bool upb_MiniTable_SetSubMessage(upb_MiniTable* table,
return false;
}
upb_MiniTableSub* table_sub = (void*)&table->subs[field->submsg_index];
upb_MiniTableSub* table_sub =
(void*)&table->subs[field->UPB_PRIVATE(submsg_index)];
table_sub->submsg = sub;
return true;
}
@ -7033,7 +7041,8 @@ bool upb_MiniTable_SetSubEnum(upb_MiniTable* table, upb_MiniTableField* field,
(uintptr_t)(table->fields + table->field_count));
UPB_ASSERT(sub);
upb_MiniTableSub* table_sub = (void*)&table->subs[field->submsg_index];
upb_MiniTableSub* table_sub =
(void*)&table->subs[field->UPB_PRIVATE(submsg_index)];
table_sub->subenum = sub;
return true;
}
@ -11436,7 +11445,7 @@ static void _upb_Decoder_Munge(int type, wireval* val) {
static upb_Message* _upb_Decoder_NewSubMessage(
upb_Decoder* d, const upb_MiniTableSub* subs,
const upb_MiniTableField* field) {
const upb_MiniTable* subl = subs[field->submsg_index].submsg;
const upb_MiniTable* subl = subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(subl);
upb_Message* msg = _upb_Message_New(subl, &d->arena);
if (!msg) _upb_Decoder_ErrorJmp(d, kUpb_DecodeStatus_OutOfMemory);
@ -11475,7 +11484,7 @@ static const char* _upb_Decoder_DecodeSubMessage(
upb_Decoder* d, const char* ptr, upb_Message* submsg,
const upb_MiniTableSub* subs, const upb_MiniTableField* field, int size) {
int saved_delta = upb_EpsCopyInputStream_PushLimit(&d->input, ptr, size);
const upb_MiniTable* subl = subs[field->submsg_index].submsg;
const upb_MiniTable* subl = subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(subl);
ptr = _upb_Decoder_RecurseSubMessage(d, ptr, submsg, subl, DECODE_NOGROUP);
upb_EpsCopyInputStream_PopLimit(&d->input, ptr, saved_delta);
@ -11506,7 +11515,7 @@ UPB_FORCEINLINE
static const char* _upb_Decoder_DecodeKnownGroup(
upb_Decoder* d, const char* ptr, upb_Message* submsg,
const upb_MiniTableSub* subs, const upb_MiniTableField* field) {
const upb_MiniTable* subl = subs[field->submsg_index].submsg;
const upb_MiniTable* subl = subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(subl);
return _upb_Decoder_DecodeGroup(d, ptr, submsg, subl, field->number);
}
@ -11570,7 +11579,7 @@ static const char* _upb_Decoder_DecodeEnumArray(upb_Decoder* d, const char* ptr,
const upb_MiniTableSub* subs,
const upb_MiniTableField* field,
wireval* val) {
const upb_MiniTableEnum* e = subs[field->submsg_index].subenum;
const upb_MiniTableEnum* e = subs[field->UPB_PRIVATE(submsg_index)].subenum;
if (!_upb_Decoder_CheckEnum(d, ptr, msg, e, field, val)) return ptr;
void* mem = UPB_PTR_AT(_upb_array_ptr(arr), arr->size * 4, void);
arr->size++;
@ -11641,7 +11650,7 @@ static const char* _upb_Decoder_DecodeEnumPacked(
upb_Decoder* d, const char* ptr, upb_Message* msg, upb_Array* arr,
const upb_MiniTableSub* subs, const upb_MiniTableField* field,
wireval* val) {
const upb_MiniTableEnum* e = subs[field->submsg_index].subenum;
const upb_MiniTableEnum* e = subs[field->UPB_PRIVATE(submsg_index)].subenum;
int saved_limit = upb_EpsCopyInputStream_PushLimit(&d->input, ptr, val->size);
char* out = UPB_PTR_AT(_upb_array_ptr(arr), arr->size * 4, void);
while (!_upb_Decoder_IsDone(d, &ptr)) {
@ -11802,7 +11811,7 @@ static const char* _upb_Decoder_DecodeToMap(upb_Decoder* d, const char* ptr,
upb_Map* map = *map_p;
upb_MapEntry ent;
UPB_ASSERT(upb_MiniTableField_Type(field) == kUpb_FieldType_Message);
const upb_MiniTable* entry = subs[field->submsg_index].submsg;
const upb_MiniTable* entry = subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(entry->field_count == 2);
UPB_ASSERT(!upb_IsRepeatedOrMap(&entry->fields[0]));
@ -11863,7 +11872,8 @@ static const char* _upb_Decoder_DecodeToSubMessage(
int type = field->descriptortype;
if (UPB_UNLIKELY(op == kUpb_DecodeOp_Enum) &&
!_upb_Decoder_CheckEnum(d, ptr, msg, subs[field->submsg_index].subenum,
!_upb_Decoder_CheckEnum(d, ptr, msg,
subs[field->UPB_PRIVATE(submsg_index)].subenum,
field, val)) {
return ptr;
}
@ -12179,7 +12189,7 @@ static void _upb_Decoder_CheckUnlinked(const upb_MiniTable* mt,
int* op) {
// If sub-message is not linked, treat as unknown.
if (field->mode & kUpb_LabelFlags_IsExtension) return;
const upb_MiniTableSub* sub = &mt->subs[field->submsg_index];
const upb_MiniTableSub* sub = &mt->subs[field->UPB_PRIVATE(submsg_index)];
if (!sub->submsg) *op = kUpb_DecodeOp_UnknownField;
}
@ -12499,9 +12509,10 @@ static upb_DecodeStatus upb_Decoder_Decode(upb_Decoder* const decoder,
UPB_ASSERT(decoder->status != kUpb_DecodeStatus_Ok);
}
arena->head.ptr = decoder->arena.head.ptr;
arena->head.end = decoder->arena.head.end;
arena->cleanup_metadata = decoder->arena.cleanup_metadata;
_upb_MemBlock* blocks =
upb_Atomic_Load(&decoder->arena.blocks, memory_order_relaxed);
arena->head = decoder->arena.head;
upb_Atomic_Store(&arena->blocks, blocks, memory_order_relaxed);
return decoder->status;
}
@ -12509,26 +12520,31 @@ upb_DecodeStatus upb_Decode(const char* buf, size_t size, void* msg,
const upb_MiniTable* l,
const upb_ExtensionRegistry* extreg, int options,
upb_Arena* arena) {
upb_Decoder state;
upb_Decoder decoder;
unsigned depth = (unsigned)options >> 16;
upb_EpsCopyInputStream_Init(&state.input, &buf, size,
upb_EpsCopyInputStream_Init(&decoder.input, &buf, size,
options & kUpb_DecodeOption_AliasString);
state.extreg = extreg;
state.unknown = NULL;
state.depth = depth ? depth : 64;
state.end_group = DECODE_NOGROUP;
state.options = (uint16_t)options;
state.missing_required = false;
state.arena.head = arena->head;
state.arena.last_size = arena->last_size;
state.arena.cleanup_metadata = arena->cleanup_metadata;
upb_Atomic_Init(&state.arena.parent_or_count,
_upb_Arena_TaggedFromPointer(arena));
state.status = kUpb_DecodeStatus_Ok;
decoder.extreg = extreg;
decoder.unknown = NULL;
decoder.depth = depth ? depth : kUpb_WireFormat_DefaultDepthLimit;
decoder.end_group = DECODE_NOGROUP;
decoder.options = (uint16_t)options;
decoder.missing_required = false;
decoder.status = kUpb_DecodeStatus_Ok;
// Violating the encapsulation of the arena for performance reasons.
// This is a temporary arena that we swap into and swap out of when we are
// done. The temporary arena only needs to be able to handle allocation,
// not fuse or free, so it does not need many of the members to be initialized
// (particularly parent_or_count).
_upb_MemBlock* blocks = upb_Atomic_Load(&arena->blocks, memory_order_relaxed);
decoder.arena.head = arena->head;
decoder.arena.block_alloc = arena->block_alloc;
upb_Atomic_Init(&decoder.arena.blocks, blocks);
return upb_Decoder_Decode(&state, buf, msg, l, arena);
return upb_Decoder_Decode(&decoder, buf, msg, l, arena);
}
#undef OP_FIXPCK_LG2
@ -13746,7 +13762,7 @@ static void encode_scalar(upb_encstate* e, const void* _field_mem,
case kUpb_FieldType_Group: {
size_t size;
void* submsg = *(void**)field_mem;
const upb_MiniTable* subm = subs[f->submsg_index].submsg;
const upb_MiniTable* subm = subs[f->UPB_PRIVATE(submsg_index)].submsg;
if (submsg == NULL) {
return;
}
@ -13760,7 +13776,7 @@ static void encode_scalar(upb_encstate* e, const void* _field_mem,
case kUpb_FieldType_Message: {
size_t size;
void* submsg = *(void**)field_mem;
const upb_MiniTable* subm = subs[f->submsg_index].submsg;
const upb_MiniTable* subm = subs[f->UPB_PRIVATE(submsg_index)].submsg;
if (submsg == NULL) {
return;
}
@ -13849,7 +13865,7 @@ static void encode_array(upb_encstate* e, const upb_Message* msg,
case kUpb_FieldType_Group: {
const void* const* start = _upb_array_constptr(arr);
const void* const* ptr = start + arr->size;
const upb_MiniTable* subm = subs[f->submsg_index].submsg;
const upb_MiniTable* subm = subs[f->UPB_PRIVATE(submsg_index)].submsg;
if (--e->depth == 0) encode_err(e, kUpb_EncodeStatus_MaxDepthExceeded);
do {
size_t size;
@ -13864,7 +13880,7 @@ static void encode_array(upb_encstate* e, const upb_Message* msg,
case kUpb_FieldType_Message: {
const void* const* start = _upb_array_constptr(arr);
const void* const* ptr = start + arr->size;
const upb_MiniTable* subm = subs[f->submsg_index].submsg;
const upb_MiniTable* subm = subs[f->UPB_PRIVATE(submsg_index)].submsg;
if (--e->depth == 0) encode_err(e, kUpb_EncodeStatus_MaxDepthExceeded);
do {
size_t size;
@ -13903,7 +13919,7 @@ static void encode_map(upb_encstate* e, const upb_Message* msg,
const upb_MiniTableSub* subs,
const upb_MiniTableField* f) {
const upb_Map* map = *UPB_PTR_AT(msg, f->offset, const upb_Map*);
const upb_MiniTable* layout = subs[f->submsg_index].submsg;
const upb_MiniTable* layout = subs[f->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(layout->field_count == 2);
if (map == NULL) return;
@ -14106,7 +14122,7 @@ upb_EncodeStatus upb_Encode(const void* msg, const upb_MiniTable* l,
e.buf = NULL;
e.limit = NULL;
e.ptr = NULL;
e.depth = depth ? depth : 64;
e.depth = depth ? depth : kUpb_WireFormat_DefaultDepthLimit;
e.options = options;
_upb_mapsorter_init(&e.sorter);
@ -14195,3 +14211,4 @@ const char* _upb_WireReader_SkipGroup(const char* ptr, uint32_t tag,
#undef UPB_IS_GOOGLE3
#undef UPB_ATOMIC
#undef UPB_USE_C11_ATOMICS
#undef UPB_PRIVATE

@ -169,14 +169,16 @@
#ifdef __GNUC__
#define UPB_USE_C11_ATOMICS
#define UPB_ATOMIC _Atomic
#define UPB_ATOMIC(T) _Atomic(T)
#else
#define UPB_ATOMIC
#define UPB_ATOMIC(T) T
#endif
/* UPB_PTRADD(ptr, ofs): add pointer while avoiding "NULL + 0" UB */
#define UPB_PTRADD(ptr, ofs) ((ofs) ? (ptr) + (ofs) : (ptr))
#define UPB_PRIVATE(x) x##_dont_copy_me__upb_internal_use_only
/* Configure whether fasttable is switched on or not. *************************/
#ifdef __has_attribute
@ -615,12 +617,14 @@ UPB_INLINE void upb_gfree(void* ptr) { upb_free(&upb_alloc_global, ptr); }
typedef struct upb_Arena upb_Arena;
typedef void upb_CleanupFunc(void* context);
// LINT.IfChange(arena_head)
typedef struct {
char *ptr, *end;
} _upb_ArenaHead;
// LINT.ThenChange(//depot/google3/third_party/upb/js/impl/upb_bits/arena.ts:arena_head)
#ifdef __cplusplus
extern "C" {
#endif
@ -631,8 +635,6 @@ extern "C" {
UPB_API upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc);
UPB_API void upb_Arena_Free(upb_Arena* a);
UPB_API bool upb_Arena_AddCleanup(upb_Arena* a, void* ud,
upb_CleanupFunc* func);
UPB_API bool upb_Arena_Fuse(upb_Arena* a, upb_Arena* b);
void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size);
@ -1508,11 +1510,17 @@ size_t upb_Message_ExtensionCount(const upb_Message* msg);
// Must be last.
// LINT.IfChange(mini_table_field_layout)
struct upb_MiniTableField {
uint32_t number;
uint16_t offset;
int16_t presence; // If >0, hasbit_index. If <0, ~oneof_index
uint16_t submsg_index; // kUpb_NoSub if descriptortype != MESSAGE/GROUP/ENUM
// Indexes into `upb_MiniTable.subs`
// Will be set to `kUpb_NoSub` if `descriptortype` != MESSAGE/GROUP/ENUM
uint16_t UPB_PRIVATE(submsg_index);
uint8_t descriptortype;
// upb_FieldMode | upb_LabelFlags | (upb_FieldRep << kUpb_FieldRep_Shift)
@ -1556,6 +1564,8 @@ typedef enum {
#define kUpb_FieldRep_Shift 6
// LINT.ThenChange(//depot/google3/third_party/upb/js/impl/upb_bits/mini_table_field.ts:mini_table_field_layout)
UPB_INLINE upb_FieldRep
_upb_MiniTableField_GetRep(const upb_MiniTableField* field) {
return (upb_FieldRep)(field->mode >> kUpb_FieldRep_Shift);
@ -1647,6 +1657,7 @@ UPB_INLINE uint32_t _upb_getoneofcase_field(const upb_Message* msg,
}
// LINT.ThenChange(GoogleInternalName2)
// LINT.ThenChange(//depot/google3/third_party/upb/js/impl/upb_bits/presence.ts:presence_logic)
#ifdef __cplusplus
} /* extern "C" */
@ -1752,6 +1763,8 @@ typedef enum {
kUpb_ExtMode_IsMapEntry = 4,
} upb_ExtMode;
// LINT.IfChange(mini_table_layout)
// upb_MiniTable represents the memory layout of a given upb_MessageDef.
// The members are public so generated code can initialize them,
// but users MUST NOT directly read or write any of its members.
@ -1775,6 +1788,8 @@ struct upb_MiniTable {
_upb_FastTable_Entry fasttable[];
};
// LINT.ThenChange(//depot/google3/third_party/upb/js/impl/upb_bits/mini_table.ts:presence_logic)
// Map entries aren't actually stored for map fields, they are only used during
// parsing. For parsing, it helps a lot if all map entry messages have the same
// layout. The layout code in mini_table/decode.c will ensure that all map
@ -2294,14 +2309,27 @@ UPB_API_INLINE bool upb_MiniTableField_HasPresence(
}
}
// Returns the MiniTable for this message field. If the field is unlinked,
// returns NULL.
UPB_API_INLINE const upb_MiniTable* upb_MiniTable_GetSubMessageTable(
const upb_MiniTable* mini_table, const upb_MiniTableField* field) {
return mini_table->subs[field->submsg_index].submsg;
UPB_ASSERT(upb_MiniTableField_CType(field) == kUpb_CType_Message);
return mini_table->subs[field->UPB_PRIVATE(submsg_index)].submsg;
}
// Returns the MiniTableEnum for this enum field. If the field is unlinked,
// returns NULL.
UPB_API_INLINE const upb_MiniTableEnum* upb_MiniTable_GetSubEnumTable(
const upb_MiniTable* mini_table, const upb_MiniTableField* field) {
return mini_table->subs[field->submsg_index].subenum;
UPB_ASSERT(upb_MiniTableField_CType(field) == kUpb_CType_Enum);
return mini_table->subs[field->UPB_PRIVATE(submsg_index)].subenum;
}
// Returns true if this MiniTable field is linked to a MiniTable for the
// sub-message.
UPB_API_INLINE bool upb_MiniTable_MessageFieldIsLinked(
const upb_MiniTable* mini_table, const upb_MiniTableField* field) {
return upb_MiniTable_GetSubMessageTable(mini_table, field) != NULL;
}
// If this field is in a oneof, returns the first field in the oneof.
@ -2379,6 +2407,8 @@ UPB_INLINE void _upb_Message_SetPresence(upb_Message* msg,
}
}
// LINT.IfChange(message_raw_fields)
UPB_INLINE bool _upb_MiniTable_ValueIsNonZero(const void* default_val,
const upb_MiniTableField* field) {
char zero[16] = {0};
@ -2417,6 +2447,8 @@ UPB_INLINE void _upb_MiniTable_CopyFieldData(void* to, const void* from,
UPB_UNREACHABLE();
}
// LINT.ThenChange(//depot/google3/third_party/upb/js/impl/upb_bits/message.ts:message_raw_fields)
UPB_INLINE size_t
_upb_MiniTable_ElementSizeLg2(const upb_MiniTableField* field) {
const unsigned char table[] = {
@ -2840,7 +2872,7 @@ UPB_API_INLINE void upb_Message_SetMessage(upb_Message* msg,
UPB_ASSUME(!upb_IsRepeatedOrMap(field));
UPB_ASSUME(_upb_MiniTableField_GetRep(field) ==
UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte));
UPB_ASSERT(mini_table->subs[field->submsg_index].submsg);
UPB_ASSERT(mini_table->subs[field->UPB_PRIVATE(submsg_index)].submsg);
_upb_Message_SetNonExtensionField(msg, field, &sub_message);
}
@ -2852,7 +2884,7 @@ UPB_API_INLINE upb_Message* upb_Message_GetOrCreateMutableMessage(
upb_Message* sub_message = *UPB_PTR_AT(msg, field->offset, upb_Message*);
if (!sub_message) {
const upb_MiniTable* sub_mini_table =
mini_table->subs[field->submsg_index].submsg;
mini_table->subs[field->UPB_PRIVATE(submsg_index)].submsg;
UPB_ASSERT(sub_mini_table);
sub_message = _upb_Message_New(sub_mini_table, arena);
*UPB_PTR_AT(msg, field->offset, upb_Message*) = sub_message;
@ -2864,7 +2896,7 @@ UPB_API_INLINE upb_Message* upb_Message_GetOrCreateMutableMessage(
UPB_API_INLINE const upb_Array* upb_Message_GetArray(
const upb_Message* msg, const upb_MiniTableField* field) {
_upb_MiniTableField_CheckIsArray(field);
const upb_Array* ret;
upb_Array* ret;
const upb_Array* default_val = NULL;
_upb_Message_GetNonExtensionField(msg, field, &default_val, &ret);
return ret;
@ -2918,7 +2950,7 @@ UPB_API_INLINE bool upb_MiniTableField_IsClosedEnum(
UPB_API_INLINE const upb_Map* upb_Message_GetMap(
const upb_Message* msg, const upb_MiniTableField* field) {
_upb_MiniTableField_CheckIsMap(field);
const upb_Map* ret;
upb_Map* ret;
const upb_Map* default_val = NULL;
_upb_Message_GetNonExtensionField(msg, field, &default_val, &ret);
return ret;
@ -2996,7 +3028,8 @@ typedef struct {
// Finds first occurrence of unknown data by tag id in message.
upb_FindUnknownRet upb_MiniTable_FindUnknown(const upb_Message* msg,
uint32_t field_number);
uint32_t field_number,
int depth_limit);
typedef enum {
kUpb_UnknownToMessage_Ok,
@ -3088,13 +3121,19 @@ enum {
kUpb_DecodeOption_CheckRequired = 2,
};
#define UPB_DECODE_MAXDEPTH(depth) ((depth) << 16)
UPB_INLINE uint32_t upb_DecodeOptions_MaxDepth(uint16_t depth) {
return (uint32_t)depth << 16;
}
UPB_INLINE uint16_t upb_DecodeOptions_GetMaxDepth(uint32_t options) {
return options >> 16;
}
// Enforce an upper bound on recursion depth.
UPB_INLINE int upb_Decode_LimitDepth(uint32_t decode_options, uint32_t limit) {
uint32_t max_depth = decode_options >> 16;
uint32_t max_depth = upb_DecodeOptions_GetMaxDepth(decode_options);
if (max_depth > limit) max_depth = limit;
return (max_depth << 16) | (decode_options & 0xffff);
return upb_DecodeOptions_MaxDepth(max_depth) | (decode_options & 0xffff);
}
typedef enum {
@ -9639,28 +9678,31 @@ typedef struct _upb_MemBlock _upb_MemBlock;
struct upb_Arena {
_upb_ArenaHead head;
/* Stores cleanup metadata for this arena.
* - a pointer to the current cleanup counter.
* - a boolean indicating if there is an unowned initial block. */
uintptr_t cleanup_metadata;
/* Allocator to allocate arena blocks. We are responsible for freeing these
* when we are destroyed. */
upb_alloc* block_alloc;
uint32_t last_size;
/* When multiple arenas are fused together, each arena points to a parent
* arena (root points to itself). The root tracks how many live arenas
* reference it.
*
* The low bit is tagged:
* 0: pointer to parent
* 1: count, left shifted by one
*/
UPB_ATOMIC uintptr_t parent_or_count;
/* Linked list of blocks to free/cleanup. */
_upb_MemBlock *freelist, *freelist_tail;
// upb_alloc* together with a low bit which signals if there is an initial
// block.
uintptr_t block_alloc;
// When multiple arenas are fused together, each arena points to a parent
// arena (root points to itself). The root tracks how many live arenas
// reference it.
// The low bit is tagged:
// 0: pointer to parent
// 1: count, left shifted by one
UPB_ATOMIC(uintptr_t) parent_or_count;
// All nodes that are fused together are in a singly-linked list.
UPB_ATOMIC(upb_Arena*) next; // NULL at end of list.
// The last element of the linked list. This is present only as an
// optimization, so that we do not have to iterate over all members for every
// fuse. Only significant for an arena root. In other cases it is ignored.
UPB_ATOMIC(upb_Arena*) tail; // == self when no other list members.
// Linked list of blocks to free/cleanup. Atomic only for the benefit of
// upb_Arena_SpaceAllocated().
UPB_ATOMIC(_upb_MemBlock*) blocks;
};
UPB_INLINE bool _upb_Arena_IsTaggedRefcount(uintptr_t parent_or_count) {
@ -9671,13 +9713,13 @@ UPB_INLINE bool _upb_Arena_IsTaggedPointer(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 0;
}
UPB_INLINE uint32_t _upb_Arena_RefCountFromTagged(uintptr_t parent_or_count) {
UPB_INLINE uintptr_t _upb_Arena_RefCountFromTagged(uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count >> 1;
}
UPB_INLINE uintptr_t _upb_Arena_TaggedFromRefcount(uint32_t refcount) {
uintptr_t parent_or_count = (((uintptr_t)refcount) << 1) | 1;
UPB_INLINE uintptr_t _upb_Arena_TaggedFromRefcount(uintptr_t refcount) {
uintptr_t parent_or_count = (refcount << 1) | 1;
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count;
}
@ -9693,6 +9735,21 @@ UPB_INLINE uintptr_t _upb_Arena_TaggedFromPointer(upb_Arena* a) {
return parent_or_count;
}
UPB_INLINE upb_alloc* upb_Arena_BlockAlloc(upb_Arena* arena) {
return (upb_alloc*)(arena->block_alloc & ~0x1);
}
UPB_INLINE uintptr_t upb_Arena_MakeBlockAlloc(upb_alloc* alloc,
bool has_initial) {
uintptr_t alloc_uint = (uintptr_t)alloc;
UPB_ASSERT((alloc_uint & 1) == 0);
return alloc_uint | (has_initial ? 1 : 0);
}
UPB_INLINE bool upb_Arena_HasInitialBlock(upb_Arena* arena) {
return arena->block_alloc & 0x1;
}
#endif /* UPB_MEM_ARENA_INTERNAL_H_ */
@ -9705,79 +9762,75 @@ UPB_INLINE uintptr_t _upb_Arena_TaggedFromPointer(upb_Arena* a) {
#include <stdatomic.h>
#include <stdbool.h>
UPB_INLINE void upb_Atomic_Init(_Atomic uintptr_t* addr, uintptr_t val) {
atomic_init(addr, val);
}
UPB_INLINE uintptr_t upb_Atomic_LoadAcquire(_Atomic uintptr_t* addr) {
return atomic_load_explicit(addr, memory_order_acquire);
}
UPB_INLINE void upb_Atomic_StoreRelaxed(_Atomic uintptr_t* addr,
uintptr_t val) {
atomic_store_explicit(addr, val, memory_order_relaxed);
}
#define upb_Atomic_Init(addr, val) atomic_init(addr, val)
#define upb_Atomic_Load(addr, order) atomic_load_explicit(addr, order)
#define upb_Atomic_Store(addr, val, order) \
atomic_store_explicit(addr, val, order)
#define upb_Atomic_Add(addr, val, order) \
atomic_fetch_add_explicit(addr, val, order)
#define upb_Atomic_Sub(addr, val, order) \
atomic_fetch_sub_explicit(addr, val, memory_order_release);
#define upb_Atomic_CompareExchangeStrong(addr, expected, desired, \
success_order, failure_order) \
atomic_compare_exchange_strong_explicit(addr, expected, desired, \
success_order, failure_order)
#define upb_Atomic_CompareExchangeWeak(addr, expected, desired, success_order, \
failure_order) \
atomic_compare_exchange_weak_explicit(addr, expected, desired, \
success_order, failure_order)
UPB_INLINE void upb_Atomic_AddRelease(_Atomic uintptr_t* addr, uintptr_t val) {
atomic_fetch_add_explicit(addr, val, memory_order_release);
}
#else // !UPB_USE_C11_ATOMICS
UPB_INLINE void upb_Atomic_SubRelease(_Atomic uintptr_t* addr, uintptr_t val) {
atomic_fetch_sub_explicit(addr, val, memory_order_release);
}
#include <string.h>
UPB_INLINE uintptr_t upb_Atomic_ExchangeAcqRel(_Atomic uintptr_t* addr,
uintptr_t val) {
return atomic_exchange_explicit(addr, val, memory_order_acq_rel);
#define upb_Atomic_Init(addr, val) (*addr = val)
#define upb_Atomic_Load(addr, order) (*addr)
#define upb_Atomic_Store(addr, val, order) (*(addr) = val)
#define upb_Atomic_Add(addr, val, order) (*(addr) += val)
#define upb_Atomic_Sub(addr, val, order) (*(addr) -= val)
// `addr` and `expected` are logically double pointers.
UPB_INLINE bool _upb_NonAtomic_CompareExchangeStrongP(void* addr,
void* expected,
void* desired) {
if (memcmp(addr, expected, sizeof(desired)) == 0) {
memcpy(addr, &desired, sizeof(desired));
return true;
} else {
memcpy(expected, addr, sizeof(desired));
return false;
}
}
UPB_INLINE bool upb_Atomic_CompareExchangeStrongAcqRel(_Atomic uintptr_t* addr,
uintptr_t* expected,
uintptr_t desired) {
return atomic_compare_exchange_strong_explicit(
addr, expected, desired, memory_order_release, memory_order_acquire);
}
#define upb_Atomic_CompareExchangeStrong(addr, expected, desired, \
success_order, failure_order) \
_upb_NonAtomic_CompareExchangeStrongP((void*)addr, (void*)expected, \
(void*)desired)
#define upb_Atomic_CompareExchangeWeak(addr, expected, desired, success_order, \
failure_order) \
upb_Atomic_CompareExchangeStrong(addr, expected, desired, 0, 0)
#else // !UPB_USE_C11_ATOMICS
#endif
UPB_INLINE void upb_Atomic_Init(uintptr_t* addr, uintptr_t val) { *addr = val; }
UPB_INLINE uintptr_t upb_Atomic_LoadAcquire(uintptr_t* addr) { return *addr; }
#endif // UPB_PORT_ATOMIC_H_
UPB_INLINE void upb_Atomic_StoreRelaxed(uintptr_t* addr, uintptr_t val) {
*addr = val;
}
#ifndef UPB_WIRE_COMMON_H_
#define UPB_WIRE_COMMON_H_
UPB_INLINE void upb_Atomic_AddRelease(uintptr_t* addr, uintptr_t val) {
*addr += val;
}
// Must be last.
UPB_INLINE void upb_Atomic_SubRelease(uintptr_t* addr, uintptr_t val) {
*addr -= val;
}
#ifdef __cplusplus
extern "C" {
#endif
UPB_INLINE uintptr_t upb_Atomic_ExchangeAcqRel(uintptr_t* addr, uintptr_t val) {
uintptr_t ret = *addr;
*addr = val;
return ret;
}
#define kUpb_WireFormat_DefaultDepthLimit 100
UPB_INLINE bool upb_Atomic_CompareExchangeStrongAcqRel(uintptr_t* addr,
uintptr_t* expected,
uintptr_t desired) {
if (*addr == *expected) {
*addr = desired;
return true;
} else {
*expected = *addr;
return false;
}
#ifdef __cplusplus
}
#endif
#endif // UPB_PORT_ATOMIC_H_
#endif // UPB_WIRE_COMMON_H_
#ifndef UPB_WIRE_READER_H_
#define UPB_WIRE_READER_H_
@ -10182,7 +10235,8 @@ UPB_API upb_MiniTableExtension* _upb_MiniTableExtension_Build(
UPB_API_INLINE upb_MiniTableExtension* upb_MiniTableExtension_Build(
const char* data, size_t len, const upb_MiniTable* extendee,
upb_Arena* arena, upb_Status* status) {
upb_MiniTableSub sub = {.submsg = NULL};
upb_MiniTableSub sub;
sub.submsg = NULL;
return _upb_MiniTableExtension_Build(
data, len, extendee, sub, kUpb_MiniTablePlatform_Native, arena, status);
}
@ -10190,7 +10244,8 @@ UPB_API_INLINE upb_MiniTableExtension* upb_MiniTableExtension_Build(
UPB_API_INLINE upb_MiniTableExtension* upb_MiniTableExtension_BuildMessage(
const char* data, size_t len, const upb_MiniTable* extendee,
upb_MiniTable* submsg, upb_Arena* arena, upb_Status* status) {
upb_MiniTableSub sub = {.submsg = submsg};
upb_MiniTableSub sub;
sub.submsg = submsg;
return _upb_MiniTableExtension_Build(
data, len, extendee, sub, kUpb_MiniTablePlatform_Native, arena, status);
}
@ -10198,7 +10253,8 @@ UPB_API_INLINE upb_MiniTableExtension* upb_MiniTableExtension_BuildMessage(
UPB_API_INLINE upb_MiniTableExtension* upb_MiniTableExtension_BuildEnum(
const char* data, size_t len, const upb_MiniTable* extendee,
upb_MiniTableEnum* subenum, upb_Arena* arena, upb_Status* status) {
upb_MiniTableSub sub = {.subenum = subenum};
upb_MiniTableSub sub;
sub.subenum = subenum;
return _upb_MiniTableExtension_Build(
data, len, extendee, sub, kUpb_MiniTablePlatform_Native, arena, status);
}
@ -11099,3 +11155,4 @@ UPB_INLINE uint32_t _upb_FastDecoder_LoadTag(const char* ptr) {
#undef UPB_IS_GOOGLE3
#undef UPB_ATOMIC
#undef UPB_USE_C11_ATOMICS
#undef UPB_PRIVATE

Loading…
Cancel
Save