Auto-generate files after cl/592345066

pull/15154/head
Protobuf Team Bot 11 months ago
parent e3ed59102c
commit f75fe9e5f5
  1. 277
      php/ext/google/protobuf/php-upb.c
  2. 192
      php/ext/google/protobuf/php-upb.h
  3. 277
      ruby/ext/google/protobuf_c/ruby-upb.c
  4. 192
      ruby/ext/google/protobuf_c/ruby-upb.h

@ -5527,21 +5527,61 @@ upb_alloc upb_alloc_global = {&upb_global_allocfunc};
// Must be last.
struct _upb_MemBlock {
typedef struct upb_MemBlock {
// Atomic only for the benefit of SpaceAllocated().
UPB_ATOMIC(_upb_MemBlock*) next;
UPB_ATOMIC(struct upb_MemBlock*) next;
uint32_t size;
// Data follows.
};
static const size_t kUpb_MemblockReserve =
UPB_ALIGN_UP(sizeof(_upb_MemBlock), UPB_MALLOC_ALIGN);
} upb_MemBlock;
typedef struct upb_ArenaInternal {
// upb_alloc* together with a low bit which signals if there is an initial
// block.
uintptr_t block_alloc;
// When multiple arenas are fused together, each arena points to a parent
// arena (root points to itself). The root tracks how many live arenas
// reference it.
// The low bit is tagged:
// 0: pointer to parent
// 1: count, left shifted by one
UPB_ATOMIC(uintptr_t) parent_or_count;
// All nodes that are fused together are in a singly-linked list.
// == NULL at end of list.
UPB_ATOMIC(struct upb_ArenaInternal*) next;
// The last element of the linked list. This is present only as an
// optimization, so that we do not have to iterate over all members for every
// fuse. Only significant for an arena root. In other cases it is ignored.
// == self when no other list members.
UPB_ATOMIC(struct upb_ArenaInternal*) tail;
// Linked list of blocks to free/cleanup. Atomic only for the benefit of
// upb_Arena_SpaceAllocated().
UPB_ATOMIC(upb_MemBlock*) blocks;
} upb_ArenaInternal;
// All public + private state for an arena.
typedef struct {
upb_Arena head;
upb_ArenaInternal body;
} upb_ArenaState;
typedef struct {
upb_Arena* root;
upb_ArenaInternal* root;
uintptr_t tagged_count;
} upb_ArenaRoot;
static const size_t kUpb_MemblockReserve =
UPB_ALIGN_UP(sizeof(upb_MemBlock), UPB_MALLOC_ALIGN);
// Extracts the (upb_ArenaInternal*) from a (upb_Arena*)
static upb_ArenaInternal* upb_Arena_Internal(const upb_Arena* a) {
return &((upb_ArenaState*)a)->body;
}
static bool _upb_Arena_IsTaggedRefcount(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 1;
}
@ -5561,19 +5601,20 @@ static uintptr_t _upb_Arena_TaggedFromRefcount(uintptr_t refcount) {
return parent_or_count;
}
static upb_Arena* _upb_Arena_PointerFromTagged(uintptr_t parent_or_count) {
static upb_ArenaInternal* _upb_Arena_PointerFromTagged(
uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return (upb_Arena*)parent_or_count;
return (upb_ArenaInternal*)parent_or_count;
}
static uintptr_t _upb_Arena_TaggedFromPointer(upb_Arena* a) {
uintptr_t parent_or_count = (uintptr_t)a;
static uintptr_t _upb_Arena_TaggedFromPointer(upb_ArenaInternal* ai) {
uintptr_t parent_or_count = (uintptr_t)ai;
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return parent_or_count;
}
static upb_alloc* _upb_Arena_BlockAlloc(upb_Arena* arena) {
return (upb_alloc*)(arena->block_alloc & ~0x1);
static upb_alloc* _upb_ArenaInternal_BlockAlloc(upb_ArenaInternal* ai) {
return (upb_alloc*)(ai->block_alloc & ~0x1);
}
static uintptr_t _upb_Arena_MakeBlockAlloc(upb_alloc* alloc, bool has_initial) {
@ -5582,15 +5623,16 @@ static uintptr_t _upb_Arena_MakeBlockAlloc(upb_alloc* alloc, bool has_initial) {
return alloc_uint | (has_initial ? 1 : 0);
}
static bool _upb_Arena_HasInitialBlock(upb_Arena* arena) {
return arena->block_alloc & 0x1;
static bool _upb_ArenaInternal_HasInitialBlock(upb_ArenaInternal* ai) {
return ai->block_alloc & 0x1;
}
static upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
upb_ArenaInternal* ai = upb_Arena_Internal(a);
uintptr_t poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
upb_Arena* next = _upb_Arena_PointerFromTagged(poc);
UPB_ASSERT(a != next);
upb_ArenaInternal* next = _upb_Arena_PointerFromTagged(poc);
UPB_ASSERT(ai != next);
uintptr_t next_poc =
upb_Atomic_Load(&next->parent_or_count, memory_order_acquire);
@ -5614,64 +5656,67 @@ static upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
// further away over time, but the path towards that root will continue to
// be valid and the creation of the path carries all the memory orderings
// required.
UPB_ASSERT(a != _upb_Arena_PointerFromTagged(next_poc));
upb_Atomic_Store(&a->parent_or_count, next_poc, memory_order_relaxed);
UPB_ASSERT(ai != _upb_Arena_PointerFromTagged(next_poc));
upb_Atomic_Store(&ai->parent_or_count, next_poc, memory_order_relaxed);
}
a = next;
ai = next;
poc = next_poc;
}
return (upb_ArenaRoot){.root = a, .tagged_count = poc};
return (upb_ArenaRoot){.root = ai, .tagged_count = poc};
}
size_t upb_Arena_SpaceAllocated(upb_Arena* arena) {
arena = _upb_Arena_FindRoot(arena).root;
upb_ArenaInternal* ai = _upb_Arena_FindRoot(arena).root;
size_t memsize = 0;
while (arena != NULL) {
_upb_MemBlock* block =
upb_Atomic_Load(&arena->blocks, memory_order_relaxed);
while (ai != NULL) {
upb_MemBlock* block = upb_Atomic_Load(&ai->blocks, memory_order_relaxed);
while (block != NULL) {
memsize += sizeof(_upb_MemBlock) + block->size;
memsize += sizeof(upb_MemBlock) + block->size;
block = upb_Atomic_Load(&block->next, memory_order_relaxed);
}
arena = upb_Atomic_Load(&arena->next, memory_order_relaxed);
ai = upb_Atomic_Load(&ai->next, memory_order_relaxed);
}
return memsize;
}
uint32_t upb_Arena_DebugRefCount(upb_Arena* a) {
upb_ArenaInternal* ai = upb_Arena_Internal(a);
// These loads could probably be relaxed, but given that this is debug-only,
// it's not worth introducing a new variant for it.
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
uintptr_t poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
a = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
ai = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
}
return _upb_Arena_RefCountFromTagged(poc);
}
static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
_upb_MemBlock* block = ptr;
upb_ArenaInternal* ai = upb_Arena_Internal(a);
upb_MemBlock* block = ptr;
// Insert into linked list.
block->size = (uint32_t)size;
upb_Atomic_Init(&block->next, a->blocks);
upb_Atomic_Store(&a->blocks, block, memory_order_release);
upb_Atomic_Init(&block->next, ai->blocks);
upb_Atomic_Store(&ai->blocks, block, memory_order_release);
a->head.UPB_PRIVATE(ptr) = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(block, size, char);
a->UPB_PRIVATE(ptr) = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
a->UPB_PRIVATE(end) = UPB_PTR_AT(block, size, char);
UPB_POISON_MEMORY_REGION(a->head.UPB_PRIVATE(ptr),
a->head.UPB_PRIVATE(end) - a->head.UPB_PRIVATE(ptr));
UPB_POISON_MEMORY_REGION(a->UPB_PRIVATE(ptr),
a->UPB_PRIVATE(end) - a->UPB_PRIVATE(ptr));
}
static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
if (!a->block_alloc) return false;
_upb_MemBlock* last_block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
upb_ArenaInternal* ai = upb_Arena_Internal(a);
if (!ai->block_alloc) return false;
upb_MemBlock* last_block = upb_Atomic_Load(&ai->blocks, memory_order_acquire);
size_t last_size = last_block != NULL ? last_block->size : 128;
size_t block_size = UPB_MAX(size, last_size * 2) + kUpb_MemblockReserve;
_upb_MemBlock* block = upb_malloc(_upb_Arena_BlockAlloc(a), block_size);
upb_MemBlock* block =
upb_malloc(_upb_ArenaInternal_BlockAlloc(ai), block_size);
if (!block) return false;
_upb_Arena_AddBlock(a, block, block_size);
@ -5685,8 +5730,9 @@ void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size) {
}
static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) {
const size_t first_block_overhead = sizeof(upb_Arena) + kUpb_MemblockReserve;
upb_Arena* a;
const size_t first_block_overhead =
sizeof(upb_ArenaState) + kUpb_MemblockReserve;
upb_ArenaState* a;
// We need to malloc the initial block.
char* mem;
@ -5695,22 +5741,23 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) {
return NULL;
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
n -= sizeof(*a);
a = UPB_PTR_AT(mem, n - sizeof(upb_ArenaState), upb_ArenaState);
n -= sizeof(upb_ArenaState);
a->block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->body.next, NULL);
upb_Atomic_Init(&a->body.tail, &a->body);
upb_Atomic_Init(&a->body.blocks, NULL);
_upb_Arena_AddBlock(a, mem, n);
_upb_Arena_AddBlock(&a->head, mem, n);
return a;
return &a->head;
}
upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
upb_Arena* a;
UPB_ASSERT(sizeof(void*) * UPB_ARENA_SIZE_HACK >= sizeof(upb_ArenaState));
upb_ArenaState* a;
if (n) {
/* Align initial pointer up so that we return properly-aligned pointers. */
@ -5722,63 +5769,65 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
/* Round block size down to alignof(*a) since we will allocate the arena
* itself at the end. */
n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_Arena));
n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_ArenaState));
if (UPB_UNLIKELY(n < sizeof(upb_Arena))) {
if (UPB_UNLIKELY(n < sizeof(upb_ArenaState))) {
return _upb_Arena_InitSlow(alloc);
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
a = UPB_PTR_AT(mem, n - sizeof(upb_ArenaState), upb_ArenaState);
upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->body.next, NULL);
upb_Atomic_Init(&a->body.tail, &a->body);
upb_Atomic_Init(&a->body.blocks, NULL);
upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
a->block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 1);
a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 1);
a->head.UPB_PRIVATE(ptr) = mem;
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(mem, n - sizeof(*a), char);
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(mem, n - sizeof(upb_ArenaState), char);
return a;
return &a->head;
}
static void _upb_Arena_DoFree(upb_Arena* a) {
UPB_ASSERT(_upb_Arena_RefCountFromTagged(a->parent_or_count) == 1);
static void _upb_Arena_DoFree(upb_ArenaInternal* ai) {
UPB_ASSERT(_upb_Arena_RefCountFromTagged(ai->parent_or_count) == 1);
while (a != NULL) {
while (ai != NULL) {
// Load first since arena itself is likely from one of its blocks.
upb_Arena* next_arena =
(upb_Arena*)upb_Atomic_Load(&a->next, memory_order_acquire);
upb_alloc* block_alloc = _upb_Arena_BlockAlloc(a);
_upb_MemBlock* block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
upb_ArenaInternal* next_arena =
(upb_ArenaInternal*)upb_Atomic_Load(&ai->next, memory_order_acquire);
upb_alloc* block_alloc = _upb_ArenaInternal_BlockAlloc(ai);
upb_MemBlock* block = upb_Atomic_Load(&ai->blocks, memory_order_acquire);
while (block != NULL) {
// Load first since we are deleting block.
_upb_MemBlock* next_block =
upb_MemBlock* next_block =
upb_Atomic_Load(&block->next, memory_order_acquire);
upb_free(block_alloc, block);
block = next_block;
}
a = next_arena;
ai = next_arena;
}
}
void upb_Arena_Free(upb_Arena* a) {
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
upb_ArenaInternal* ai = upb_Arena_Internal(a);
uintptr_t poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
retry:
while (_upb_Arena_IsTaggedPointer(poc)) {
a = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
ai = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
}
// compare_exchange or fetch_sub are RMW operations, which are more
// expensive then direct loads. As an optimization, we only do RMW ops
// when we need to update things for other threads to see.
if (poc == _upb_Arena_TaggedFromRefcount(1)) {
_upb_Arena_DoFree(a);
_upb_Arena_DoFree(ai);
return;
}
if (upb_Atomic_CompareExchangeWeak(
&a->parent_or_count, &poc,
&ai->parent_or_count, &poc,
_upb_Arena_TaggedFromRefcount(_upb_Arena_RefCountFromTagged(poc) - 1),
memory_order_release, memory_order_acquire)) {
// We were >1 and we decremented it successfully, so we are done.
@ -5790,12 +5839,14 @@ retry:
goto retry;
}
static void _upb_Arena_DoFuseArenaLists(upb_Arena* const parent,
upb_Arena* child) {
upb_Arena* parent_tail = upb_Atomic_Load(&parent->tail, memory_order_relaxed);
static void _upb_Arena_DoFuseArenaLists(upb_ArenaInternal* const parent,
upb_ArenaInternal* child) {
upb_ArenaInternal* parent_tail =
upb_Atomic_Load(&parent->tail, memory_order_relaxed);
do {
// Our tail might be stale, but it will always converge to the true tail.
upb_Arena* parent_tail_next =
upb_ArenaInternal* parent_tail_next =
upb_Atomic_Load(&parent_tail->next, memory_order_relaxed);
while (parent_tail_next != NULL) {
parent_tail = parent_tail_next;
@ -5803,7 +5854,7 @@ static void _upb_Arena_DoFuseArenaLists(upb_Arena* const parent,
upb_Atomic_Load(&parent_tail->next, memory_order_relaxed);
}
upb_Arena* displaced =
upb_ArenaInternal* displaced =
upb_Atomic_Exchange(&parent_tail->next, child, memory_order_relaxed);
parent_tail = upb_Atomic_Load(&child->tail, memory_order_relaxed);
@ -5815,8 +5866,8 @@ static void _upb_Arena_DoFuseArenaLists(upb_Arena* const parent,
upb_Atomic_Store(&parent->tail, parent_tail, memory_order_relaxed);
}
static upb_Arena* _upb_Arena_DoFuse(upb_Arena* a1, upb_Arena* a2,
uintptr_t* ref_delta) {
static upb_ArenaInternal* _upb_Arena_DoFuse(upb_Arena* a1, upb_Arena* a2,
uintptr_t* ref_delta) {
// `parent_or_count` has two disctint modes
// - parent pointer mode
// - refcount mode
@ -5874,7 +5925,8 @@ static upb_Arena* _upb_Arena_DoFuse(upb_Arena* a1, upb_Arena* a2,
return r1.root;
}
static bool _upb_Arena_FixupRefs(upb_Arena* new_root, uintptr_t ref_delta) {
static bool _upb_Arena_FixupRefs(upb_ArenaInternal* new_root,
uintptr_t ref_delta) {
if (ref_delta == 0) return true; // No fixup required.
uintptr_t poc =
upb_Atomic_Load(&new_root->parent_or_count, memory_order_relaxed);
@ -5889,28 +5941,33 @@ static bool _upb_Arena_FixupRefs(upb_Arena* new_root, uintptr_t ref_delta) {
bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
if (a1 == a2) return true; // trivial fuse
upb_ArenaInternal* ai1 = upb_Arena_Internal(a1);
upb_ArenaInternal* ai2 = upb_Arena_Internal(a2);
// Do not fuse initial blocks since we cannot lifetime extend them.
// Any other fuse scenario is allowed.
if (_upb_Arena_HasInitialBlock(a1) || _upb_Arena_HasInitialBlock(a2)) {
if (_upb_ArenaInternal_HasInitialBlock(ai1) ||
_upb_ArenaInternal_HasInitialBlock(ai2)) {
return false;
}
// The number of refs we ultimately need to transfer to the new root.
uintptr_t ref_delta = 0;
while (true) {
upb_Arena* new_root = _upb_Arena_DoFuse(a1, a2, &ref_delta);
upb_ArenaInternal* new_root = _upb_Arena_DoFuse(a1, a2, &ref_delta);
if (new_root != NULL && _upb_Arena_FixupRefs(new_root, ref_delta)) {
return true;
}
}
}
bool upb_Arena_IncRefFor(upb_Arena* arena, const void* owner) {
bool upb_Arena_IncRefFor(upb_Arena* a, const void* owner) {
upb_ArenaInternal* ai = upb_Arena_Internal(a);
if (_upb_ArenaInternal_HasInitialBlock(ai)) return false;
upb_ArenaRoot r;
if (_upb_Arena_HasInitialBlock(arena)) return false;
retry:
r = _upb_Arena_FindRoot(arena);
r = _upb_Arena_FindRoot(a);
if (upb_Atomic_CompareExchangeWeak(
&r.root->parent_or_count, &r.tagged_count,
_upb_Arena_TaggedFromRefcount(
@ -5923,8 +5980,25 @@ retry:
goto retry;
}
void upb_Arena_DecRefFor(upb_Arena* arena, const void* owner) {
upb_Arena_Free(arena);
void upb_Arena_DecRefFor(upb_Arena* a, const void* owner) { upb_Arena_Free(a); }
void UPB_PRIVATE(_upb_Arena_SwapIn)(upb_Arena* des, const upb_Arena* src) {
upb_ArenaInternal* desi = upb_Arena_Internal(des);
upb_ArenaInternal* srci = upb_Arena_Internal(src);
*des = *src;
desi->block_alloc = srci->block_alloc;
upb_MemBlock* blocks = upb_Atomic_Load(&srci->blocks, memory_order_relaxed);
upb_Atomic_Init(&desi->blocks, blocks);
}
void UPB_PRIVATE(_upb_Arena_SwapOut)(upb_Arena* des, const upb_Arena* src) {
upb_ArenaInternal* desi = upb_Arena_Internal(des);
upb_ArenaInternal* srci = upb_Arena_Internal(src);
*des = *src;
upb_MemBlock* blocks = upb_Atomic_Load(&srci->blocks, memory_order_relaxed);
upb_Atomic_Store(&desi->blocks, blocks, memory_order_relaxed);
}
@ -14027,10 +14101,8 @@ static upb_DecodeStatus upb_Decoder_Decode(upb_Decoder* const decoder,
UPB_ASSERT(decoder->status != kUpb_DecodeStatus_Ok);
}
_upb_MemBlock* blocks =
upb_Atomic_Load(&decoder->arena.blocks, memory_order_relaxed);
arena->head = decoder->arena.head;
upb_Atomic_Store(&arena->blocks, blocks, memory_order_relaxed);
UPB_PRIVATE(_upb_Arena_SwapOut)(arena, &decoder->arena);
return decoder->status;
}
@ -14057,10 +14129,7 @@ upb_DecodeStatus upb_Decode(const char* buf, size_t size, void* msg,
// done. The temporary arena only needs to be able to handle allocation,
// not fuse or free, so it does not need many of the members to be initialized
// (particularly parent_or_count).
_upb_MemBlock* blocks = upb_Atomic_Load(&arena->blocks, memory_order_relaxed);
decoder.arena.head = arena->head;
decoder.arena.block_alloc = arena->block_alloc;
upb_Atomic_Init(&decoder.arena.blocks, blocks);
UPB_PRIVATE(_upb_Arena_SwapIn)(&decoder.arena, arena);
return upb_Decoder_Decode(&decoder, buf, msg, l, arena);
}
@ -14718,7 +14787,7 @@ UPB_FORCEINLINE
static void fastdecode_docopy(upb_Decoder* d, const char* ptr, uint32_t size,
int copy, char* data, size_t data_offset,
upb_StringView* dst) {
d->arena.head.UPB_PRIVATE(ptr) += copy;
d->arena.UPB_PRIVATE(ptr) += copy;
dst->data = data + data_offset;
UPB_UNPOISON_MEMORY_REGION(data, copy);
memcpy(data, ptr, copy);
@ -14750,7 +14819,7 @@ static void fastdecode_docopy(upb_Decoder* d, const char* ptr, uint32_t size,
ptr += tagbytes + 1; \
dst->size = size; \
\
buf = d->arena.head.UPB_PRIVATE(ptr); \
buf = d->arena.UPB_PRIVATE(ptr); \
arena_has = UPB_PRIVATE(_upb_ArenaHas)(&d->arena); \
common_has = UPB_MIN(arena_has, \
upb_EpsCopyInputStream_BytesAvailable(&d->input, ptr)); \
@ -14930,8 +14999,8 @@ upb_Message* decode_newmsg_ceil(upb_Decoder* d, const upb_MiniTable* m,
if (UPB_LIKELY(msg_ceil_bytes > 0 &&
UPB_PRIVATE(_upb_ArenaHas)(&d->arena) >= msg_ceil_bytes)) {
UPB_ASSERT(size <= (size_t)msg_ceil_bytes);
msg_data = d->arena.head.UPB_PRIVATE(ptr);
d->arena.head.UPB_PRIVATE(ptr) += size;
msg_data = d->arena.UPB_PRIVATE(ptr);
d->arena.UPB_PRIVATE(ptr) += size;
UPB_UNPOISON_MEMORY_REGION(msg_data, msg_ceil_bytes);
memset(msg_data, 0, msg_ceil_bytes);
UPB_POISON_MEMORY_REGION(msg_data + size, msg_ceil_bytes - size);

@ -555,7 +555,6 @@ UPB_INLINE bool upb_StringView_IsEqual(upb_StringView a, upb_StringView b) {
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#ifndef UPB_MEM_ALLOC_H_
@ -628,43 +627,48 @@ UPB_INLINE void upb_gfree(void* ptr) { upb_free(&upb_alloc_global, ptr); }
#endif /* UPB_MEM_ALLOC_H_ */
#ifndef UPB_MEM_INTERNAL_ARENA_H_
#define UPB_MEM_INTERNAL_ARENA_H_
#include <stddef.h>
#include <stdint.h>
#include <string.h>
// Must be last.
typedef struct upb_Arena upb_Arena;
// This is QUITE an ugly hack, which specifies the number of pointers needed
// to equal (or exceed) the storage required for one upb_Arena.
//
// We need this because the decoder inlines a upb_Arena for performance but
// the full struct is not visible outside of arena.c. Yes, I know, it's awful.
#define UPB_ARENA_SIZE_HACK 7
// LINT.IfChange(struct_definition)
typedef struct {
// LINT.IfChange(upb_Array)
struct upb_Arena {
char* UPB_ONLYBITS(ptr);
char* UPB_ONLYBITS(end);
} _upb_ArenaHead;
// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts)
};
// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts:upb_Array)
#ifdef __cplusplus
extern "C" {
#endif
// Creates an arena from the given initial block (if any -- n may be 0).
// Additional blocks will be allocated from |alloc|. If |alloc| is NULL, this
// is a fixed-size arena and cannot grow.
UPB_API upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc);
UPB_API void upb_Arena_Free(upb_Arena* a);
UPB_API bool upb_Arena_Fuse(upb_Arena* a, upb_Arena* b);
bool upb_Arena_IncRefFor(upb_Arena* a, const void* owner);
void upb_Arena_DecRefFor(upb_Arena* a, const void* owner);
void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size);
size_t upb_Arena_SpaceAllocated(upb_Arena* a);
uint32_t upb_Arena_DebugRefCount(upb_Arena* a);
void UPB_PRIVATE(_upb_Arena_SwapIn)(struct upb_Arena* des,
const struct upb_Arena* src);
void UPB_PRIVATE(_upb_Arena_SwapOut)(struct upb_Arena* des,
const struct upb_Arena* src);
UPB_INLINE size_t UPB_PRIVATE(_upb_ArenaHas)(upb_Arena* a) {
const _upb_ArenaHead* h = (_upb_ArenaHead*)a;
return (size_t)(h->UPB_ONLYBITS(end) - h->UPB_ONLYBITS(ptr));
UPB_INLINE size_t UPB_PRIVATE(_upb_ArenaHas)(const struct upb_Arena* a) {
return (size_t)(a->UPB_ONLYBITS(end) - a->UPB_ONLYBITS(ptr));
}
UPB_API_INLINE void* upb_Arena_Malloc(upb_Arena* a, size_t size) {
UPB_INLINE void* UPB_PRIVATE(_upb_Arena_Malloc)(struct upb_Arena* a,
size_t size) {
void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(struct upb_Arena * a, size_t size);
size = UPB_ALIGN_MALLOC(size);
const size_t span = size + UPB_ASAN_GUARD_SIZE;
if (UPB_UNLIKELY(UPB_PRIVATE(_upb_ArenaHas)(a) < span)) {
@ -672,52 +676,34 @@ UPB_API_INLINE void* upb_Arena_Malloc(upb_Arena* a, size_t size) {
}
// We have enough space to do a fast malloc.
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
void* ret = h->UPB_ONLYBITS(ptr);
void* ret = a->UPB_ONLYBITS(ptr);
UPB_ASSERT(UPB_ALIGN_MALLOC((uintptr_t)ret) == (uintptr_t)ret);
UPB_ASSERT(UPB_ALIGN_MALLOC(size) == size);
UPB_UNPOISON_MEMORY_REGION(ret, size);
h->UPB_ONLYBITS(ptr) += span;
a->UPB_ONLYBITS(ptr) += span;
return ret;
}
// Shrinks the last alloc from arena.
// REQUIRES: (ptr, oldsize) was the last malloc/realloc from this arena.
// We could also add a upb_Arena_TryShrinkLast() which is simply a no-op if
// this was not the last alloc.
UPB_API_INLINE void upb_Arena_ShrinkLast(upb_Arena* a, void* ptr,
size_t oldsize, size_t size) {
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
// Must be the last alloc.
UPB_ASSERT((char*)ptr + oldsize ==
h->UPB_ONLYBITS(ptr) - UPB_ASAN_GUARD_SIZE);
UPB_ASSERT(size <= oldsize);
h->UPB_ONLYBITS(ptr) = (char*)ptr + size;
}
UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
size_t size) {
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
UPB_INLINE void* UPB_PRIVATE(_upb_Arena_Realloc)(struct upb_Arena* a, void* ptr,
size_t oldsize, size_t size) {
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
bool is_most_recent_alloc =
(uintptr_t)ptr + oldsize == (uintptr_t)h->UPB_ONLYBITS(ptr);
(uintptr_t)ptr + oldsize == (uintptr_t)a->UPB_ONLYBITS(ptr);
if (is_most_recent_alloc) {
ptrdiff_t diff = size - oldsize;
if ((ptrdiff_t)UPB_PRIVATE(_upb_ArenaHas)(a) >= diff) {
h->UPB_ONLYBITS(ptr) += diff;
a->UPB_ONLYBITS(ptr) += diff;
return ptr;
}
} else if (size <= oldsize) {
return ptr;
}
void* ret = upb_Arena_Malloc(a, size);
void* ret = UPB_PRIVATE(_upb_Arena_Malloc)(a, size);
if (ret && oldsize > 0) {
memcpy(ret, ptr, UPB_MIN(oldsize, size));
@ -726,10 +712,69 @@ UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
return ret;
}
UPB_INLINE void UPB_PRIVATE(_upb_Arena_ShrinkLast)(struct upb_Arena* a,
void* ptr, size_t oldsize,
size_t size) {
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
// Must be the last alloc.
UPB_ASSERT((char*)ptr + oldsize ==
a->UPB_ONLYBITS(ptr) - UPB_ASAN_GUARD_SIZE);
UPB_ASSERT(size <= oldsize);
a->UPB_ONLYBITS(ptr) = (char*)ptr + size;
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* UPB_MEM_INTERNAL_ARENA_H_ */
// Must be last.
typedef struct upb_Arena upb_Arena;
#ifdef __cplusplus
extern "C" {
#endif
// Creates an arena from the given initial block (if any -- n may be 0).
// Additional blocks will be allocated from |alloc|. If |alloc| is NULL, this
// is a fixed-size arena and cannot grow.
UPB_API upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc);
UPB_API void upb_Arena_Free(upb_Arena* a);
UPB_API bool upb_Arena_Fuse(upb_Arena* a, upb_Arena* b);
bool upb_Arena_IncRefFor(upb_Arena* a, const void* owner);
void upb_Arena_DecRefFor(upb_Arena* a, const void* owner);
size_t upb_Arena_SpaceAllocated(upb_Arena* a);
uint32_t upb_Arena_DebugRefCount(upb_Arena* a);
UPB_API_INLINE upb_Arena* upb_Arena_New(void) {
return upb_Arena_Init(NULL, 0, &upb_alloc_global);
}
UPB_API_INLINE void* upb_Arena_Malloc(struct upb_Arena* a, size_t size) {
return UPB_PRIVATE(_upb_Arena_Malloc)(a, size);
}
UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
size_t size) {
return UPB_PRIVATE(_upb_Arena_Realloc)(a, ptr, oldsize, size);
}
// Shrinks the last alloc from arena.
// REQUIRES: (ptr, oldsize) was the last malloc/realloc from this arena.
// We could also add a upb_Arena_TryShrinkLast() which is simply a no-op if
// this was not the last alloc.
UPB_API_INLINE void upb_Arena_ShrinkLast(upb_Arena* a, void* ptr,
size_t oldsize, size_t size) {
return UPB_PRIVATE(_upb_Arena_ShrinkLast)(a, ptr, oldsize, size);
}
#ifdef __cplusplus
} /* extern "C" */
#endif
@ -12154,48 +12199,6 @@ double _upb_NoLocaleStrtod(const char *str, char **endptr);
#endif /* UPB_LEX_STRTOD_H_ */
#ifndef UPB_MEM_INTERNAL_ARENA_H_
#define UPB_MEM_INTERNAL_ARENA_H_
// Must be last.
typedef struct _upb_MemBlock _upb_MemBlock;
// LINT.IfChange(struct_definition)
struct upb_Arena {
_upb_ArenaHead head;
// upb_alloc* together with a low bit which signals if there is an initial
// block.
uintptr_t block_alloc;
// When multiple arenas are fused together, each arena points to a parent
// arena (root points to itself). The root tracks how many live arenas
// reference it.
// The low bit is tagged:
// 0: pointer to parent
// 1: count, left shifted by one
UPB_ATOMIC(uintptr_t) parent_or_count;
// All nodes that are fused together are in a singly-linked list.
UPB_ATOMIC(upb_Arena*) next; // NULL at end of list.
// The last element of the linked list. This is present only as an
// optimization, so that we do not have to iterate over all members for every
// fuse. Only significant for an arena root. In other cases it is ignored.
UPB_ATOMIC(upb_Arena*) tail; // == self when no other list members.
// Linked list of blocks to free/cleanup. Atomic only for the benefit of
// upb_Arena_SpaceAllocated().
UPB_ATOMIC(_upb_MemBlock*) blocks;
};
// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts)
#endif /* UPB_MEM_INTERNAL_ARENA_H_ */
#ifndef UPB_PORT_ATOMIC_H_
#define UPB_PORT_ATOMIC_H_
@ -13364,7 +13367,10 @@ typedef struct upb_Decoder {
uint32_t end_group; // field number of END_GROUP tag, else DECODE_NOGROUP.
uint16_t options;
bool missing_required;
upb_Arena arena;
union {
upb_Arena arena;
void* foo[UPB_ARENA_SIZE_HACK];
};
upb_DecodeStatus status;
jmp_buf err;

@ -5041,21 +5041,61 @@ upb_alloc upb_alloc_global = {&upb_global_allocfunc};
// Must be last.
struct _upb_MemBlock {
typedef struct upb_MemBlock {
// Atomic only for the benefit of SpaceAllocated().
UPB_ATOMIC(_upb_MemBlock*) next;
UPB_ATOMIC(struct upb_MemBlock*) next;
uint32_t size;
// Data follows.
};
static const size_t kUpb_MemblockReserve =
UPB_ALIGN_UP(sizeof(_upb_MemBlock), UPB_MALLOC_ALIGN);
} upb_MemBlock;
typedef struct upb_ArenaInternal {
// upb_alloc* together with a low bit which signals if there is an initial
// block.
uintptr_t block_alloc;
// When multiple arenas are fused together, each arena points to a parent
// arena (root points to itself). The root tracks how many live arenas
// reference it.
// The low bit is tagged:
// 0: pointer to parent
// 1: count, left shifted by one
UPB_ATOMIC(uintptr_t) parent_or_count;
// All nodes that are fused together are in a singly-linked list.
// == NULL at end of list.
UPB_ATOMIC(struct upb_ArenaInternal*) next;
// The last element of the linked list. This is present only as an
// optimization, so that we do not have to iterate over all members for every
// fuse. Only significant for an arena root. In other cases it is ignored.
// == self when no other list members.
UPB_ATOMIC(struct upb_ArenaInternal*) tail;
// Linked list of blocks to free/cleanup. Atomic only for the benefit of
// upb_Arena_SpaceAllocated().
UPB_ATOMIC(upb_MemBlock*) blocks;
} upb_ArenaInternal;
// All public + private state for an arena.
typedef struct {
upb_Arena head;
upb_ArenaInternal body;
} upb_ArenaState;
typedef struct {
upb_Arena* root;
upb_ArenaInternal* root;
uintptr_t tagged_count;
} upb_ArenaRoot;
static const size_t kUpb_MemblockReserve =
UPB_ALIGN_UP(sizeof(upb_MemBlock), UPB_MALLOC_ALIGN);
// Extracts the (upb_ArenaInternal*) from a (upb_Arena*)
static upb_ArenaInternal* upb_Arena_Internal(const upb_Arena* a) {
return &((upb_ArenaState*)a)->body;
}
static bool _upb_Arena_IsTaggedRefcount(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 1;
}
@ -5075,19 +5115,20 @@ static uintptr_t _upb_Arena_TaggedFromRefcount(uintptr_t refcount) {
return parent_or_count;
}
static upb_Arena* _upb_Arena_PointerFromTagged(uintptr_t parent_or_count) {
static upb_ArenaInternal* _upb_Arena_PointerFromTagged(
uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return (upb_Arena*)parent_or_count;
return (upb_ArenaInternal*)parent_or_count;
}
static uintptr_t _upb_Arena_TaggedFromPointer(upb_Arena* a) {
uintptr_t parent_or_count = (uintptr_t)a;
static uintptr_t _upb_Arena_TaggedFromPointer(upb_ArenaInternal* ai) {
uintptr_t parent_or_count = (uintptr_t)ai;
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return parent_or_count;
}
static upb_alloc* _upb_Arena_BlockAlloc(upb_Arena* arena) {
return (upb_alloc*)(arena->block_alloc & ~0x1);
static upb_alloc* _upb_ArenaInternal_BlockAlloc(upb_ArenaInternal* ai) {
return (upb_alloc*)(ai->block_alloc & ~0x1);
}
static uintptr_t _upb_Arena_MakeBlockAlloc(upb_alloc* alloc, bool has_initial) {
@ -5096,15 +5137,16 @@ static uintptr_t _upb_Arena_MakeBlockAlloc(upb_alloc* alloc, bool has_initial) {
return alloc_uint | (has_initial ? 1 : 0);
}
static bool _upb_Arena_HasInitialBlock(upb_Arena* arena) {
return arena->block_alloc & 0x1;
static bool _upb_ArenaInternal_HasInitialBlock(upb_ArenaInternal* ai) {
return ai->block_alloc & 0x1;
}
static upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
upb_ArenaInternal* ai = upb_Arena_Internal(a);
uintptr_t poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
upb_Arena* next = _upb_Arena_PointerFromTagged(poc);
UPB_ASSERT(a != next);
upb_ArenaInternal* next = _upb_Arena_PointerFromTagged(poc);
UPB_ASSERT(ai != next);
uintptr_t next_poc =
upb_Atomic_Load(&next->parent_or_count, memory_order_acquire);
@ -5128,64 +5170,67 @@ static upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
// further away over time, but the path towards that root will continue to
// be valid and the creation of the path carries all the memory orderings
// required.
UPB_ASSERT(a != _upb_Arena_PointerFromTagged(next_poc));
upb_Atomic_Store(&a->parent_or_count, next_poc, memory_order_relaxed);
UPB_ASSERT(ai != _upb_Arena_PointerFromTagged(next_poc));
upb_Atomic_Store(&ai->parent_or_count, next_poc, memory_order_relaxed);
}
a = next;
ai = next;
poc = next_poc;
}
return (upb_ArenaRoot){.root = a, .tagged_count = poc};
return (upb_ArenaRoot){.root = ai, .tagged_count = poc};
}
size_t upb_Arena_SpaceAllocated(upb_Arena* arena) {
arena = _upb_Arena_FindRoot(arena).root;
upb_ArenaInternal* ai = _upb_Arena_FindRoot(arena).root;
size_t memsize = 0;
while (arena != NULL) {
_upb_MemBlock* block =
upb_Atomic_Load(&arena->blocks, memory_order_relaxed);
while (ai != NULL) {
upb_MemBlock* block = upb_Atomic_Load(&ai->blocks, memory_order_relaxed);
while (block != NULL) {
memsize += sizeof(_upb_MemBlock) + block->size;
memsize += sizeof(upb_MemBlock) + block->size;
block = upb_Atomic_Load(&block->next, memory_order_relaxed);
}
arena = upb_Atomic_Load(&arena->next, memory_order_relaxed);
ai = upb_Atomic_Load(&ai->next, memory_order_relaxed);
}
return memsize;
}
uint32_t upb_Arena_DebugRefCount(upb_Arena* a) {
upb_ArenaInternal* ai = upb_Arena_Internal(a);
// These loads could probably be relaxed, but given that this is debug-only,
// it's not worth introducing a new variant for it.
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
uintptr_t poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
a = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
ai = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
}
return _upb_Arena_RefCountFromTagged(poc);
}
static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
_upb_MemBlock* block = ptr;
upb_ArenaInternal* ai = upb_Arena_Internal(a);
upb_MemBlock* block = ptr;
// Insert into linked list.
block->size = (uint32_t)size;
upb_Atomic_Init(&block->next, a->blocks);
upb_Atomic_Store(&a->blocks, block, memory_order_release);
upb_Atomic_Init(&block->next, ai->blocks);
upb_Atomic_Store(&ai->blocks, block, memory_order_release);
a->head.UPB_PRIVATE(ptr) = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(block, size, char);
a->UPB_PRIVATE(ptr) = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
a->UPB_PRIVATE(end) = UPB_PTR_AT(block, size, char);
UPB_POISON_MEMORY_REGION(a->head.UPB_PRIVATE(ptr),
a->head.UPB_PRIVATE(end) - a->head.UPB_PRIVATE(ptr));
UPB_POISON_MEMORY_REGION(a->UPB_PRIVATE(ptr),
a->UPB_PRIVATE(end) - a->UPB_PRIVATE(ptr));
}
static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
if (!a->block_alloc) return false;
_upb_MemBlock* last_block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
upb_ArenaInternal* ai = upb_Arena_Internal(a);
if (!ai->block_alloc) return false;
upb_MemBlock* last_block = upb_Atomic_Load(&ai->blocks, memory_order_acquire);
size_t last_size = last_block != NULL ? last_block->size : 128;
size_t block_size = UPB_MAX(size, last_size * 2) + kUpb_MemblockReserve;
_upb_MemBlock* block = upb_malloc(_upb_Arena_BlockAlloc(a), block_size);
upb_MemBlock* block =
upb_malloc(_upb_ArenaInternal_BlockAlloc(ai), block_size);
if (!block) return false;
_upb_Arena_AddBlock(a, block, block_size);
@ -5199,8 +5244,9 @@ void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size) {
}
static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) {
const size_t first_block_overhead = sizeof(upb_Arena) + kUpb_MemblockReserve;
upb_Arena* a;
const size_t first_block_overhead =
sizeof(upb_ArenaState) + kUpb_MemblockReserve;
upb_ArenaState* a;
// We need to malloc the initial block.
char* mem;
@ -5209,22 +5255,23 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) {
return NULL;
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
n -= sizeof(*a);
a = UPB_PTR_AT(mem, n - sizeof(upb_ArenaState), upb_ArenaState);
n -= sizeof(upb_ArenaState);
a->block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->body.next, NULL);
upb_Atomic_Init(&a->body.tail, &a->body);
upb_Atomic_Init(&a->body.blocks, NULL);
_upb_Arena_AddBlock(a, mem, n);
_upb_Arena_AddBlock(&a->head, mem, n);
return a;
return &a->head;
}
upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
upb_Arena* a;
UPB_ASSERT(sizeof(void*) * UPB_ARENA_SIZE_HACK >= sizeof(upb_ArenaState));
upb_ArenaState* a;
if (n) {
/* Align initial pointer up so that we return properly-aligned pointers. */
@ -5236,63 +5283,65 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
/* Round block size down to alignof(*a) since we will allocate the arena
* itself at the end. */
n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_Arena));
n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_ArenaState));
if (UPB_UNLIKELY(n < sizeof(upb_Arena))) {
if (UPB_UNLIKELY(n < sizeof(upb_ArenaState))) {
return _upb_Arena_InitSlow(alloc);
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
a = UPB_PTR_AT(mem, n - sizeof(upb_ArenaState), upb_ArenaState);
upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->body.next, NULL);
upb_Atomic_Init(&a->body.tail, &a->body);
upb_Atomic_Init(&a->body.blocks, NULL);
upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
a->block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 1);
a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 1);
a->head.UPB_PRIVATE(ptr) = mem;
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(mem, n - sizeof(*a), char);
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(mem, n - sizeof(upb_ArenaState), char);
return a;
return &a->head;
}
static void _upb_Arena_DoFree(upb_Arena* a) {
UPB_ASSERT(_upb_Arena_RefCountFromTagged(a->parent_or_count) == 1);
static void _upb_Arena_DoFree(upb_ArenaInternal* ai) {
UPB_ASSERT(_upb_Arena_RefCountFromTagged(ai->parent_or_count) == 1);
while (a != NULL) {
while (ai != NULL) {
// Load first since arena itself is likely from one of its blocks.
upb_Arena* next_arena =
(upb_Arena*)upb_Atomic_Load(&a->next, memory_order_acquire);
upb_alloc* block_alloc = _upb_Arena_BlockAlloc(a);
_upb_MemBlock* block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
upb_ArenaInternal* next_arena =
(upb_ArenaInternal*)upb_Atomic_Load(&ai->next, memory_order_acquire);
upb_alloc* block_alloc = _upb_ArenaInternal_BlockAlloc(ai);
upb_MemBlock* block = upb_Atomic_Load(&ai->blocks, memory_order_acquire);
while (block != NULL) {
// Load first since we are deleting block.
_upb_MemBlock* next_block =
upb_MemBlock* next_block =
upb_Atomic_Load(&block->next, memory_order_acquire);
upb_free(block_alloc, block);
block = next_block;
}
a = next_arena;
ai = next_arena;
}
}
void upb_Arena_Free(upb_Arena* a) {
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
upb_ArenaInternal* ai = upb_Arena_Internal(a);
uintptr_t poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
retry:
while (_upb_Arena_IsTaggedPointer(poc)) {
a = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
ai = _upb_Arena_PointerFromTagged(poc);
poc = upb_Atomic_Load(&ai->parent_or_count, memory_order_acquire);
}
// compare_exchange or fetch_sub are RMW operations, which are more
// expensive then direct loads. As an optimization, we only do RMW ops
// when we need to update things for other threads to see.
if (poc == _upb_Arena_TaggedFromRefcount(1)) {
_upb_Arena_DoFree(a);
_upb_Arena_DoFree(ai);
return;
}
if (upb_Atomic_CompareExchangeWeak(
&a->parent_or_count, &poc,
&ai->parent_or_count, &poc,
_upb_Arena_TaggedFromRefcount(_upb_Arena_RefCountFromTagged(poc) - 1),
memory_order_release, memory_order_acquire)) {
// We were >1 and we decremented it successfully, so we are done.
@ -5304,12 +5353,14 @@ retry:
goto retry;
}
static void _upb_Arena_DoFuseArenaLists(upb_Arena* const parent,
upb_Arena* child) {
upb_Arena* parent_tail = upb_Atomic_Load(&parent->tail, memory_order_relaxed);
static void _upb_Arena_DoFuseArenaLists(upb_ArenaInternal* const parent,
upb_ArenaInternal* child) {
upb_ArenaInternal* parent_tail =
upb_Atomic_Load(&parent->tail, memory_order_relaxed);
do {
// Our tail might be stale, but it will always converge to the true tail.
upb_Arena* parent_tail_next =
upb_ArenaInternal* parent_tail_next =
upb_Atomic_Load(&parent_tail->next, memory_order_relaxed);
while (parent_tail_next != NULL) {
parent_tail = parent_tail_next;
@ -5317,7 +5368,7 @@ static void _upb_Arena_DoFuseArenaLists(upb_Arena* const parent,
upb_Atomic_Load(&parent_tail->next, memory_order_relaxed);
}
upb_Arena* displaced =
upb_ArenaInternal* displaced =
upb_Atomic_Exchange(&parent_tail->next, child, memory_order_relaxed);
parent_tail = upb_Atomic_Load(&child->tail, memory_order_relaxed);
@ -5329,8 +5380,8 @@ static void _upb_Arena_DoFuseArenaLists(upb_Arena* const parent,
upb_Atomic_Store(&parent->tail, parent_tail, memory_order_relaxed);
}
static upb_Arena* _upb_Arena_DoFuse(upb_Arena* a1, upb_Arena* a2,
uintptr_t* ref_delta) {
static upb_ArenaInternal* _upb_Arena_DoFuse(upb_Arena* a1, upb_Arena* a2,
uintptr_t* ref_delta) {
// `parent_or_count` has two disctint modes
// - parent pointer mode
// - refcount mode
@ -5388,7 +5439,8 @@ static upb_Arena* _upb_Arena_DoFuse(upb_Arena* a1, upb_Arena* a2,
return r1.root;
}
static bool _upb_Arena_FixupRefs(upb_Arena* new_root, uintptr_t ref_delta) {
static bool _upb_Arena_FixupRefs(upb_ArenaInternal* new_root,
uintptr_t ref_delta) {
if (ref_delta == 0) return true; // No fixup required.
uintptr_t poc =
upb_Atomic_Load(&new_root->parent_or_count, memory_order_relaxed);
@ -5403,28 +5455,33 @@ static bool _upb_Arena_FixupRefs(upb_Arena* new_root, uintptr_t ref_delta) {
bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
if (a1 == a2) return true; // trivial fuse
upb_ArenaInternal* ai1 = upb_Arena_Internal(a1);
upb_ArenaInternal* ai2 = upb_Arena_Internal(a2);
// Do not fuse initial blocks since we cannot lifetime extend them.
// Any other fuse scenario is allowed.
if (_upb_Arena_HasInitialBlock(a1) || _upb_Arena_HasInitialBlock(a2)) {
if (_upb_ArenaInternal_HasInitialBlock(ai1) ||
_upb_ArenaInternal_HasInitialBlock(ai2)) {
return false;
}
// The number of refs we ultimately need to transfer to the new root.
uintptr_t ref_delta = 0;
while (true) {
upb_Arena* new_root = _upb_Arena_DoFuse(a1, a2, &ref_delta);
upb_ArenaInternal* new_root = _upb_Arena_DoFuse(a1, a2, &ref_delta);
if (new_root != NULL && _upb_Arena_FixupRefs(new_root, ref_delta)) {
return true;
}
}
}
bool upb_Arena_IncRefFor(upb_Arena* arena, const void* owner) {
bool upb_Arena_IncRefFor(upb_Arena* a, const void* owner) {
upb_ArenaInternal* ai = upb_Arena_Internal(a);
if (_upb_ArenaInternal_HasInitialBlock(ai)) return false;
upb_ArenaRoot r;
if (_upb_Arena_HasInitialBlock(arena)) return false;
retry:
r = _upb_Arena_FindRoot(arena);
r = _upb_Arena_FindRoot(a);
if (upb_Atomic_CompareExchangeWeak(
&r.root->parent_or_count, &r.tagged_count,
_upb_Arena_TaggedFromRefcount(
@ -5437,8 +5494,25 @@ retry:
goto retry;
}
void upb_Arena_DecRefFor(upb_Arena* arena, const void* owner) {
upb_Arena_Free(arena);
void upb_Arena_DecRefFor(upb_Arena* a, const void* owner) { upb_Arena_Free(a); }
void UPB_PRIVATE(_upb_Arena_SwapIn)(upb_Arena* des, const upb_Arena* src) {
upb_ArenaInternal* desi = upb_Arena_Internal(des);
upb_ArenaInternal* srci = upb_Arena_Internal(src);
*des = *src;
desi->block_alloc = srci->block_alloc;
upb_MemBlock* blocks = upb_Atomic_Load(&srci->blocks, memory_order_relaxed);
upb_Atomic_Init(&desi->blocks, blocks);
}
void UPB_PRIVATE(_upb_Arena_SwapOut)(upb_Arena* des, const upb_Arena* src) {
upb_ArenaInternal* desi = upb_Arena_Internal(des);
upb_ArenaInternal* srci = upb_Arena_Internal(src);
*des = *src;
upb_MemBlock* blocks = upb_Atomic_Load(&srci->blocks, memory_order_relaxed);
upb_Atomic_Store(&desi->blocks, blocks, memory_order_relaxed);
}
@ -13541,10 +13615,8 @@ static upb_DecodeStatus upb_Decoder_Decode(upb_Decoder* const decoder,
UPB_ASSERT(decoder->status != kUpb_DecodeStatus_Ok);
}
_upb_MemBlock* blocks =
upb_Atomic_Load(&decoder->arena.blocks, memory_order_relaxed);
arena->head = decoder->arena.head;
upb_Atomic_Store(&arena->blocks, blocks, memory_order_relaxed);
UPB_PRIVATE(_upb_Arena_SwapOut)(arena, &decoder->arena);
return decoder->status;
}
@ -13571,10 +13643,7 @@ upb_DecodeStatus upb_Decode(const char* buf, size_t size, void* msg,
// done. The temporary arena only needs to be able to handle allocation,
// not fuse or free, so it does not need many of the members to be initialized
// (particularly parent_or_count).
_upb_MemBlock* blocks = upb_Atomic_Load(&arena->blocks, memory_order_relaxed);
decoder.arena.head = arena->head;
decoder.arena.block_alloc = arena->block_alloc;
upb_Atomic_Init(&decoder.arena.blocks, blocks);
UPB_PRIVATE(_upb_Arena_SwapIn)(&decoder.arena, arena);
return upb_Decoder_Decode(&decoder, buf, msg, l, arena);
}
@ -14232,7 +14301,7 @@ UPB_FORCEINLINE
static void fastdecode_docopy(upb_Decoder* d, const char* ptr, uint32_t size,
int copy, char* data, size_t data_offset,
upb_StringView* dst) {
d->arena.head.UPB_PRIVATE(ptr) += copy;
d->arena.UPB_PRIVATE(ptr) += copy;
dst->data = data + data_offset;
UPB_UNPOISON_MEMORY_REGION(data, copy);
memcpy(data, ptr, copy);
@ -14264,7 +14333,7 @@ static void fastdecode_docopy(upb_Decoder* d, const char* ptr, uint32_t size,
ptr += tagbytes + 1; \
dst->size = size; \
\
buf = d->arena.head.UPB_PRIVATE(ptr); \
buf = d->arena.UPB_PRIVATE(ptr); \
arena_has = UPB_PRIVATE(_upb_ArenaHas)(&d->arena); \
common_has = UPB_MIN(arena_has, \
upb_EpsCopyInputStream_BytesAvailable(&d->input, ptr)); \
@ -14444,8 +14513,8 @@ upb_Message* decode_newmsg_ceil(upb_Decoder* d, const upb_MiniTable* m,
if (UPB_LIKELY(msg_ceil_bytes > 0 &&
UPB_PRIVATE(_upb_ArenaHas)(&d->arena) >= msg_ceil_bytes)) {
UPB_ASSERT(size <= (size_t)msg_ceil_bytes);
msg_data = d->arena.head.UPB_PRIVATE(ptr);
d->arena.head.UPB_PRIVATE(ptr) += size;
msg_data = d->arena.UPB_PRIVATE(ptr);
d->arena.UPB_PRIVATE(ptr) += size;
UPB_UNPOISON_MEMORY_REGION(msg_data, msg_ceil_bytes);
memset(msg_data, 0, msg_ceil_bytes);
UPB_POISON_MEMORY_REGION(msg_data + size, msg_ceil_bytes - size);

@ -557,7 +557,6 @@ UPB_INLINE bool upb_StringView_IsEqual(upb_StringView a, upb_StringView b) {
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#ifndef UPB_MEM_ALLOC_H_
@ -630,43 +629,48 @@ UPB_INLINE void upb_gfree(void* ptr) { upb_free(&upb_alloc_global, ptr); }
#endif /* UPB_MEM_ALLOC_H_ */
#ifndef UPB_MEM_INTERNAL_ARENA_H_
#define UPB_MEM_INTERNAL_ARENA_H_
#include <stddef.h>
#include <stdint.h>
#include <string.h>
// Must be last.
typedef struct upb_Arena upb_Arena;
// This is QUITE an ugly hack, which specifies the number of pointers needed
// to equal (or exceed) the storage required for one upb_Arena.
//
// We need this because the decoder inlines a upb_Arena for performance but
// the full struct is not visible outside of arena.c. Yes, I know, it's awful.
#define UPB_ARENA_SIZE_HACK 7
// LINT.IfChange(struct_definition)
typedef struct {
// LINT.IfChange(upb_Array)
struct upb_Arena {
char* UPB_ONLYBITS(ptr);
char* UPB_ONLYBITS(end);
} _upb_ArenaHead;
// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts)
};
// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts:upb_Array)
#ifdef __cplusplus
extern "C" {
#endif
// Creates an arena from the given initial block (if any -- n may be 0).
// Additional blocks will be allocated from |alloc|. If |alloc| is NULL, this
// is a fixed-size arena and cannot grow.
UPB_API upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc);
UPB_API void upb_Arena_Free(upb_Arena* a);
UPB_API bool upb_Arena_Fuse(upb_Arena* a, upb_Arena* b);
bool upb_Arena_IncRefFor(upb_Arena* a, const void* owner);
void upb_Arena_DecRefFor(upb_Arena* a, const void* owner);
void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size);
size_t upb_Arena_SpaceAllocated(upb_Arena* a);
uint32_t upb_Arena_DebugRefCount(upb_Arena* a);
void UPB_PRIVATE(_upb_Arena_SwapIn)(struct upb_Arena* des,
const struct upb_Arena* src);
void UPB_PRIVATE(_upb_Arena_SwapOut)(struct upb_Arena* des,
const struct upb_Arena* src);
UPB_INLINE size_t UPB_PRIVATE(_upb_ArenaHas)(upb_Arena* a) {
const _upb_ArenaHead* h = (_upb_ArenaHead*)a;
return (size_t)(h->UPB_ONLYBITS(end) - h->UPB_ONLYBITS(ptr));
UPB_INLINE size_t UPB_PRIVATE(_upb_ArenaHas)(const struct upb_Arena* a) {
return (size_t)(a->UPB_ONLYBITS(end) - a->UPB_ONLYBITS(ptr));
}
UPB_API_INLINE void* upb_Arena_Malloc(upb_Arena* a, size_t size) {
UPB_INLINE void* UPB_PRIVATE(_upb_Arena_Malloc)(struct upb_Arena* a,
size_t size) {
void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(struct upb_Arena * a, size_t size);
size = UPB_ALIGN_MALLOC(size);
const size_t span = size + UPB_ASAN_GUARD_SIZE;
if (UPB_UNLIKELY(UPB_PRIVATE(_upb_ArenaHas)(a) < span)) {
@ -674,52 +678,34 @@ UPB_API_INLINE void* upb_Arena_Malloc(upb_Arena* a, size_t size) {
}
// We have enough space to do a fast malloc.
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
void* ret = h->UPB_ONLYBITS(ptr);
void* ret = a->UPB_ONLYBITS(ptr);
UPB_ASSERT(UPB_ALIGN_MALLOC((uintptr_t)ret) == (uintptr_t)ret);
UPB_ASSERT(UPB_ALIGN_MALLOC(size) == size);
UPB_UNPOISON_MEMORY_REGION(ret, size);
h->UPB_ONLYBITS(ptr) += span;
a->UPB_ONLYBITS(ptr) += span;
return ret;
}
// Shrinks the last alloc from arena.
// REQUIRES: (ptr, oldsize) was the last malloc/realloc from this arena.
// We could also add a upb_Arena_TryShrinkLast() which is simply a no-op if
// this was not the last alloc.
UPB_API_INLINE void upb_Arena_ShrinkLast(upb_Arena* a, void* ptr,
size_t oldsize, size_t size) {
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
// Must be the last alloc.
UPB_ASSERT((char*)ptr + oldsize ==
h->UPB_ONLYBITS(ptr) - UPB_ASAN_GUARD_SIZE);
UPB_ASSERT(size <= oldsize);
h->UPB_ONLYBITS(ptr) = (char*)ptr + size;
}
UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
size_t size) {
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
UPB_INLINE void* UPB_PRIVATE(_upb_Arena_Realloc)(struct upb_Arena* a, void* ptr,
size_t oldsize, size_t size) {
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
bool is_most_recent_alloc =
(uintptr_t)ptr + oldsize == (uintptr_t)h->UPB_ONLYBITS(ptr);
(uintptr_t)ptr + oldsize == (uintptr_t)a->UPB_ONLYBITS(ptr);
if (is_most_recent_alloc) {
ptrdiff_t diff = size - oldsize;
if ((ptrdiff_t)UPB_PRIVATE(_upb_ArenaHas)(a) >= diff) {
h->UPB_ONLYBITS(ptr) += diff;
a->UPB_ONLYBITS(ptr) += diff;
return ptr;
}
} else if (size <= oldsize) {
return ptr;
}
void* ret = upb_Arena_Malloc(a, size);
void* ret = UPB_PRIVATE(_upb_Arena_Malloc)(a, size);
if (ret && oldsize > 0) {
memcpy(ret, ptr, UPB_MIN(oldsize, size));
@ -728,10 +714,69 @@ UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
return ret;
}
UPB_INLINE void UPB_PRIVATE(_upb_Arena_ShrinkLast)(struct upb_Arena* a,
void* ptr, size_t oldsize,
size_t size) {
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
// Must be the last alloc.
UPB_ASSERT((char*)ptr + oldsize ==
a->UPB_ONLYBITS(ptr) - UPB_ASAN_GUARD_SIZE);
UPB_ASSERT(size <= oldsize);
a->UPB_ONLYBITS(ptr) = (char*)ptr + size;
}
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* UPB_MEM_INTERNAL_ARENA_H_ */
// Must be last.
typedef struct upb_Arena upb_Arena;
#ifdef __cplusplus
extern "C" {
#endif
// Creates an arena from the given initial block (if any -- n may be 0).
// Additional blocks will be allocated from |alloc|. If |alloc| is NULL, this
// is a fixed-size arena and cannot grow.
UPB_API upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc);
UPB_API void upb_Arena_Free(upb_Arena* a);
UPB_API bool upb_Arena_Fuse(upb_Arena* a, upb_Arena* b);
bool upb_Arena_IncRefFor(upb_Arena* a, const void* owner);
void upb_Arena_DecRefFor(upb_Arena* a, const void* owner);
size_t upb_Arena_SpaceAllocated(upb_Arena* a);
uint32_t upb_Arena_DebugRefCount(upb_Arena* a);
UPB_API_INLINE upb_Arena* upb_Arena_New(void) {
return upb_Arena_Init(NULL, 0, &upb_alloc_global);
}
UPB_API_INLINE void* upb_Arena_Malloc(struct upb_Arena* a, size_t size) {
return UPB_PRIVATE(_upb_Arena_Malloc)(a, size);
}
UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
size_t size) {
return UPB_PRIVATE(_upb_Arena_Realloc)(a, ptr, oldsize, size);
}
// Shrinks the last alloc from arena.
// REQUIRES: (ptr, oldsize) was the last malloc/realloc from this arena.
// We could also add a upb_Arena_TryShrinkLast() which is simply a no-op if
// this was not the last alloc.
UPB_API_INLINE void upb_Arena_ShrinkLast(upb_Arena* a, void* ptr,
size_t oldsize, size_t size) {
return UPB_PRIVATE(_upb_Arena_ShrinkLast)(a, ptr, oldsize, size);
}
#ifdef __cplusplus
} /* extern "C" */
#endif
@ -11926,48 +11971,6 @@ double _upb_NoLocaleStrtod(const char *str, char **endptr);
#endif /* UPB_LEX_STRTOD_H_ */
#ifndef UPB_MEM_INTERNAL_ARENA_H_
#define UPB_MEM_INTERNAL_ARENA_H_
// Must be last.
typedef struct _upb_MemBlock _upb_MemBlock;
// LINT.IfChange(struct_definition)
struct upb_Arena {
_upb_ArenaHead head;
// upb_alloc* together with a low bit which signals if there is an initial
// block.
uintptr_t block_alloc;
// When multiple arenas are fused together, each arena points to a parent
// arena (root points to itself). The root tracks how many live arenas
// reference it.
// The low bit is tagged:
// 0: pointer to parent
// 1: count, left shifted by one
UPB_ATOMIC(uintptr_t) parent_or_count;
// All nodes that are fused together are in a singly-linked list.
UPB_ATOMIC(upb_Arena*) next; // NULL at end of list.
// The last element of the linked list. This is present only as an
// optimization, so that we do not have to iterate over all members for every
// fuse. Only significant for an arena root. In other cases it is ignored.
UPB_ATOMIC(upb_Arena*) tail; // == self when no other list members.
// Linked list of blocks to free/cleanup. Atomic only for the benefit of
// upb_Arena_SpaceAllocated().
UPB_ATOMIC(_upb_MemBlock*) blocks;
};
// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts)
#endif /* UPB_MEM_INTERNAL_ARENA_H_ */
#ifndef UPB_PORT_ATOMIC_H_
#define UPB_PORT_ATOMIC_H_
@ -13183,7 +13186,10 @@ typedef struct upb_Decoder {
uint32_t end_group; // field number of END_GROUP tag, else DECODE_NOGROUP.
uint16_t options;
bool missing_required;
upb_Arena arena;
union {
upb_Arena arena;
void* foo[UPB_ARENA_SIZE_HACK];
};
upb_DecodeStatus status;
jmp_buf err;

Loading…
Cancel
Save