Auto-generate files after cl/590419408

pull/15064/head
Protobuf Team Bot 1 year ago
parent 088fee7f28
commit 3d597c265a
  1. 132
      php/ext/google/protobuf/php-upb.c
  2. 87
      php/ext/google/protobuf/php-upb.h
  3. 132
      ruby/ext/google/protobuf_c/ruby-upb.c
  4. 87
      ruby/ext/google/protobuf_c/ruby-upb.h

@ -5521,6 +5521,9 @@ static void* upb_global_allocfunc(upb_alloc* alloc, void* ptr, size_t oldsize,
upb_alloc upb_alloc_global = {&upb_global_allocfunc};
#include <stddef.h>
#include <stdint.h>
// Must be last.
@ -5531,15 +5534,59 @@ struct _upb_MemBlock {
// Data follows.
};
static const size_t memblock_reserve =
static const size_t kUpb_MemblockReserve =
UPB_ALIGN_UP(sizeof(_upb_MemBlock), UPB_MALLOC_ALIGN);
typedef struct _upb_ArenaRoot {
typedef struct {
upb_Arena* root;
uintptr_t tagged_count;
} _upb_ArenaRoot;
} upb_ArenaRoot;
static bool _upb_Arena_IsTaggedRefcount(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 1;
}
static bool _upb_Arena_IsTaggedPointer(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 0;
}
static uintptr_t _upb_Arena_RefCountFromTagged(uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count >> 1;
}
static uintptr_t _upb_Arena_TaggedFromRefcount(uintptr_t refcount) {
uintptr_t parent_or_count = (refcount << 1) | 1;
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count;
}
static _upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
static upb_Arena* _upb_Arena_PointerFromTagged(uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return (upb_Arena*)parent_or_count;
}
static uintptr_t _upb_Arena_TaggedFromPointer(upb_Arena* a) {
uintptr_t parent_or_count = (uintptr_t)a;
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return parent_or_count;
}
static upb_alloc* _upb_Arena_BlockAlloc(upb_Arena* arena) {
return (upb_alloc*)(arena->block_alloc & ~0x1);
}
static uintptr_t _upb_Arena_MakeBlockAlloc(upb_alloc* alloc, bool has_initial) {
uintptr_t alloc_uint = (uintptr_t)alloc;
UPB_ASSERT((alloc_uint & 1) == 0);
return alloc_uint | (has_initial ? 1 : 0);
}
static bool _upb_Arena_HasInitialBlock(upb_Arena* arena) {
return arena->block_alloc & 0x1;
}
static upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
upb_Arena* next = _upb_Arena_PointerFromTagged(poc);
@ -5573,7 +5620,7 @@ static _upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
a = next;
poc = next_poc;
}
return (_upb_ArenaRoot){.root = a, .tagged_count = poc};
return (upb_ArenaRoot){.root = a, .tagged_count = poc};
}
size_t upb_Arena_SpaceAllocated(upb_Arena* arena) {
@ -5604,7 +5651,7 @@ uint32_t upb_Arena_DebugRefCount(upb_Arena* a) {
return _upb_Arena_RefCountFromTagged(poc);
}
static void upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
_upb_MemBlock* block = ptr;
// Insert into linked list.
@ -5612,37 +5659,36 @@ static void upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
upb_Atomic_Init(&block->next, a->blocks);
upb_Atomic_Store(&a->blocks, block, memory_order_release);
a->head.ptr = UPB_PTR_AT(block, memblock_reserve, char);
a->head.end = UPB_PTR_AT(block, size, char);
a->head.UPB_PRIVATE(ptr) = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(block, size, char);
UPB_POISON_MEMORY_REGION(a->head.ptr, a->head.end - a->head.ptr);
UPB_POISON_MEMORY_REGION(a->head.UPB_PRIVATE(ptr),
a->head.UPB_PRIVATE(end) - a->head.UPB_PRIVATE(ptr));
}
static bool upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
if (!a->block_alloc) return false;
_upb_MemBlock* last_block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
size_t last_size = last_block != NULL ? last_block->size : 128;
size_t block_size = UPB_MAX(size, last_size * 2) + memblock_reserve;
_upb_MemBlock* block = upb_malloc(upb_Arena_BlockAlloc(a), block_size);
size_t block_size = UPB_MAX(size, last_size * 2) + kUpb_MemblockReserve;
_upb_MemBlock* block = upb_malloc(_upb_Arena_BlockAlloc(a), block_size);
if (!block) return false;
upb_Arena_AddBlock(a, block, block_size);
_upb_Arena_AddBlock(a, block, block_size);
UPB_ASSERT(UPB_PRIVATE(_upb_ArenaHas)(a) >= size);
return true;
}
void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size) {
if (!upb_Arena_AllocBlock(a, size)) return NULL; /* Out of memory. */
UPB_ASSERT(_upb_ArenaHas(a) >= size);
return upb_Arena_Malloc(a, size);
void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size) {
if (!_upb_Arena_AllocBlock(a, size)) return NULL; // OOM
return upb_Arena_Malloc(a, size - UPB_ASAN_GUARD_SIZE);
}
/* Public Arena API ***********************************************************/
static upb_Arena* upb_Arena_InitSlow(upb_alloc* alloc) {
const size_t first_block_overhead = sizeof(upb_Arena) + memblock_reserve;
static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) {
const size_t first_block_overhead = sizeof(upb_Arena) + kUpb_MemblockReserve;
upb_Arena* a;
/* We need to malloc the initial block. */
// We need to malloc the initial block.
char* mem;
size_t n = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, n))) {
@ -5652,13 +5698,13 @@ static upb_Arena* upb_Arena_InitSlow(upb_alloc* alloc) {
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
n -= sizeof(*a);
a->block_alloc = upb_Arena_MakeBlockAlloc(alloc, 0);
a->block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
upb_Arena_AddBlock(a, mem, n);
_upb_Arena_AddBlock(a, mem, n);
return a;
}
@ -5679,7 +5725,7 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_Arena));
if (UPB_UNLIKELY(n < sizeof(upb_Arena))) {
return upb_Arena_InitSlow(alloc);
return _upb_Arena_InitSlow(alloc);
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
@ -5688,21 +5734,21 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
a->block_alloc = upb_Arena_MakeBlockAlloc(alloc, 1);
a->head.ptr = mem;
a->head.end = UPB_PTR_AT(mem, n - sizeof(*a), char);
a->block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 1);
a->head.UPB_PRIVATE(ptr) = mem;
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(mem, n - sizeof(*a), char);
return a;
}
static void arena_dofree(upb_Arena* a) {
static void _upb_Arena_DoFree(upb_Arena* a) {
UPB_ASSERT(_upb_Arena_RefCountFromTagged(a->parent_or_count) == 1);
while (a != NULL) {
// Load first since arena itself is likely from one of its blocks.
upb_Arena* next_arena =
(upb_Arena*)upb_Atomic_Load(&a->next, memory_order_acquire);
upb_alloc* block_alloc = upb_Arena_BlockAlloc(a);
upb_alloc* block_alloc = _upb_Arena_BlockAlloc(a);
_upb_MemBlock* block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
while (block != NULL) {
// Load first since we are deleting block.
@ -5727,7 +5773,7 @@ retry:
// expensive then direct loads. As an optimization, we only do RMW ops
// when we need to update things for other threads to see.
if (poc == _upb_Arena_TaggedFromRefcount(1)) {
arena_dofree(a);
_upb_Arena_DoFree(a);
return;
}
@ -5778,14 +5824,14 @@ static upb_Arena* _upb_Arena_DoFuse(upb_Arena* a1, upb_Arena* a2,
// In parent pointer mode, it may change what pointer it refers to in the
// tree, but it will always approach a root. Any operation that walks the
// tree to the root may collapse levels of the tree concurrently.
_upb_ArenaRoot r1 = _upb_Arena_FindRoot(a1);
_upb_ArenaRoot r2 = _upb_Arena_FindRoot(a2);
upb_ArenaRoot r1 = _upb_Arena_FindRoot(a1);
upb_ArenaRoot r2 = _upb_Arena_FindRoot(a2);
if (r1.root == r2.root) return r1.root; // Already fused.
// Avoid cycles by always fusing into the root with the lower address.
if ((uintptr_t)r1.root > (uintptr_t)r2.root) {
_upb_ArenaRoot tmp = r1;
upb_ArenaRoot tmp = r1;
r1 = r2;
r2 = tmp;
}
@ -5845,7 +5891,7 @@ bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
// Do not fuse initial blocks since we cannot lifetime extend them.
// Any other fuse scenario is allowed.
if (upb_Arena_HasInitialBlock(a1) || upb_Arena_HasInitialBlock(a2)) {
if (_upb_Arena_HasInitialBlock(a1) || _upb_Arena_HasInitialBlock(a2)) {
return false;
}
@ -5860,8 +5906,8 @@ bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
}
bool upb_Arena_IncRefFor(upb_Arena* arena, const void* owner) {
_upb_ArenaRoot r;
if (upb_Arena_HasInitialBlock(arena)) return false;
upb_ArenaRoot r;
if (_upb_Arena_HasInitialBlock(arena)) return false;
retry:
r = _upb_Arena_FindRoot(arena);
@ -14639,7 +14685,7 @@ UPB_FORCEINLINE
static void fastdecode_docopy(upb_Decoder* d, const char* ptr, uint32_t size,
int copy, char* data, size_t data_offset,
upb_StringView* dst) {
d->arena.head.ptr += copy;
d->arena.head.UPB_PRIVATE(ptr) += copy;
dst->data = data + data_offset;
UPB_UNPOISON_MEMORY_REGION(data, copy);
memcpy(data, ptr, copy);
@ -14671,8 +14717,8 @@ static void fastdecode_docopy(upb_Decoder* d, const char* ptr, uint32_t size,
ptr += tagbytes + 1; \
dst->size = size; \
\
buf = d->arena.head.ptr; \
arena_has = _upb_ArenaHas(&d->arena); \
buf = d->arena.head.UPB_PRIVATE(ptr); \
arena_has = UPB_PRIVATE(_upb_ArenaHas)(&d->arena); \
common_has = UPB_MIN(arena_has, \
upb_EpsCopyInputStream_BytesAvailable(&d->input, ptr)); \
\
@ -14849,10 +14895,10 @@ upb_Message* decode_newmsg_ceil(upb_Decoder* d, const upb_MiniTable* m,
size_t size = m->UPB_PRIVATE(size) + sizeof(upb_Message_Internal);
char* msg_data;
if (UPB_LIKELY(msg_ceil_bytes > 0 &&
_upb_ArenaHas(&d->arena) >= msg_ceil_bytes)) {
UPB_PRIVATE(_upb_ArenaHas)(&d->arena) >= msg_ceil_bytes)) {
UPB_ASSERT(size <= (size_t)msg_ceil_bytes);
msg_data = d->arena.head.ptr;
d->arena.head.ptr += size;
msg_data = d->arena.head.UPB_PRIVATE(ptr);
d->arena.head.UPB_PRIVATE(ptr) += size;
UPB_UNPOISON_MEMORY_REGION(msg_data, msg_ceil_bytes);
memset(msg_data, 0, msg_ceil_bytes);
UPB_POISON_MEMORY_REGION(msg_data + size, msg_ceil_bytes - size);

@ -634,7 +634,8 @@ typedef struct upb_Arena upb_Arena;
// LINT.IfChange(struct_definition)
typedef struct {
char *ptr, *end;
char* UPB_ONLYBITS(ptr);
char* UPB_ONLYBITS(end);
} _upb_ArenaHead;
// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts)
@ -650,33 +651,34 @@ UPB_API upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc);
UPB_API void upb_Arena_Free(upb_Arena* a);
UPB_API bool upb_Arena_Fuse(upb_Arena* a, upb_Arena* b);
bool upb_Arena_IncRefFor(upb_Arena* arena, const void* owner);
void upb_Arena_DecRefFor(upb_Arena* arena, const void* owner);
bool upb_Arena_IncRefFor(upb_Arena* a, const void* owner);
void upb_Arena_DecRefFor(upb_Arena* a, const void* owner);
void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size);
size_t upb_Arena_SpaceAllocated(upb_Arena* arena);
uint32_t upb_Arena_DebugRefCount(upb_Arena* arena);
void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size);
UPB_INLINE size_t _upb_ArenaHas(upb_Arena* a) {
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
return (size_t)(h->end - h->ptr);
size_t upb_Arena_SpaceAllocated(upb_Arena* a);
uint32_t upb_Arena_DebugRefCount(upb_Arena* a);
UPB_INLINE size_t UPB_PRIVATE(_upb_ArenaHas)(upb_Arena* a) {
const _upb_ArenaHead* h = (_upb_ArenaHead*)a;
return (size_t)(h->UPB_ONLYBITS(end) - h->UPB_ONLYBITS(ptr));
}
UPB_API_INLINE void* upb_Arena_Malloc(upb_Arena* a, size_t size) {
size = UPB_ALIGN_MALLOC(size);
size_t span = size + UPB_ASAN_GUARD_SIZE;
if (UPB_UNLIKELY(_upb_ArenaHas(a) < span)) {
return _upb_Arena_SlowMalloc(a, size);
const size_t span = size + UPB_ASAN_GUARD_SIZE;
if (UPB_UNLIKELY(UPB_PRIVATE(_upb_ArenaHas)(a) < span)) {
return UPB_PRIVATE(_upb_Arena_SlowMalloc)(a, span);
}
// We have enough space to do a fast malloc.
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
void* ret = h->ptr;
void* ret = h->UPB_ONLYBITS(ptr);
UPB_ASSERT(UPB_ALIGN_MALLOC((uintptr_t)ret) == (uintptr_t)ret);
UPB_ASSERT(UPB_ALIGN_MALLOC(size) == size);
UPB_UNPOISON_MEMORY_REGION(ret, size);
h->ptr += span;
h->UPB_ONLYBITS(ptr) += span;
return ret;
}
@ -691,9 +693,10 @@ UPB_API_INLINE void upb_Arena_ShrinkLast(upb_Arena* a, void* ptr,
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
// Must be the last alloc.
UPB_ASSERT((char*)ptr + oldsize == h->ptr - UPB_ASAN_GUARD_SIZE);
UPB_ASSERT((char*)ptr + oldsize ==
h->UPB_ONLYBITS(ptr) - UPB_ASAN_GUARD_SIZE);
UPB_ASSERT(size <= oldsize);
h->ptr = (char*)ptr + size;
h->UPB_ONLYBITS(ptr) = (char*)ptr + size;
}
UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
@ -701,12 +704,13 @@ UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
bool is_most_recent_alloc = (uintptr_t)ptr + oldsize == (uintptr_t)h->ptr;
bool is_most_recent_alloc =
(uintptr_t)ptr + oldsize == (uintptr_t)h->UPB_ONLYBITS(ptr);
if (is_most_recent_alloc) {
ptrdiff_t diff = size - oldsize;
if ((ptrdiff_t)_upb_ArenaHas(a) >= diff) {
h->ptr += diff;
if ((ptrdiff_t)UPB_PRIVATE(_upb_ArenaHas)(a) >= diff) {
h->UPB_ONLYBITS(ptr) += diff;
return ptr;
}
} else if (size <= oldsize) {
@ -12301,51 +12305,6 @@ struct upb_Arena {
};
// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts)
UPB_INLINE bool _upb_Arena_IsTaggedRefcount(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 1;
}
UPB_INLINE bool _upb_Arena_IsTaggedPointer(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 0;
}
UPB_INLINE uintptr_t _upb_Arena_RefCountFromTagged(uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count >> 1;
}
UPB_INLINE uintptr_t _upb_Arena_TaggedFromRefcount(uintptr_t refcount) {
uintptr_t parent_or_count = (refcount << 1) | 1;
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count;
}
UPB_INLINE upb_Arena* _upb_Arena_PointerFromTagged(uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return (upb_Arena*)parent_or_count;
}
UPB_INLINE uintptr_t _upb_Arena_TaggedFromPointer(upb_Arena* a) {
uintptr_t parent_or_count = (uintptr_t)a;
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return parent_or_count;
}
UPB_INLINE upb_alloc* upb_Arena_BlockAlloc(upb_Arena* arena) {
return (upb_alloc*)(arena->block_alloc & ~0x1);
}
UPB_INLINE uintptr_t upb_Arena_MakeBlockAlloc(upb_alloc* alloc,
bool has_initial) {
uintptr_t alloc_uint = (uintptr_t)alloc;
UPB_ASSERT((alloc_uint & 1) == 0);
return alloc_uint | (has_initial ? 1 : 0);
}
UPB_INLINE bool upb_Arena_HasInitialBlock(upb_Arena* arena) {
return arena->block_alloc & 0x1;
}
#endif /* UPB_MEM_INTERNAL_ARENA_H_ */

@ -5035,6 +5035,9 @@ static void* upb_global_allocfunc(upb_alloc* alloc, void* ptr, size_t oldsize,
upb_alloc upb_alloc_global = {&upb_global_allocfunc};
#include <stddef.h>
#include <stdint.h>
// Must be last.
@ -5045,15 +5048,59 @@ struct _upb_MemBlock {
// Data follows.
};
static const size_t memblock_reserve =
static const size_t kUpb_MemblockReserve =
UPB_ALIGN_UP(sizeof(_upb_MemBlock), UPB_MALLOC_ALIGN);
typedef struct _upb_ArenaRoot {
typedef struct {
upb_Arena* root;
uintptr_t tagged_count;
} _upb_ArenaRoot;
} upb_ArenaRoot;
static bool _upb_Arena_IsTaggedRefcount(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 1;
}
static bool _upb_Arena_IsTaggedPointer(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 0;
}
static uintptr_t _upb_Arena_RefCountFromTagged(uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count >> 1;
}
static uintptr_t _upb_Arena_TaggedFromRefcount(uintptr_t refcount) {
uintptr_t parent_or_count = (refcount << 1) | 1;
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count;
}
static _upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
static upb_Arena* _upb_Arena_PointerFromTagged(uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return (upb_Arena*)parent_or_count;
}
static uintptr_t _upb_Arena_TaggedFromPointer(upb_Arena* a) {
uintptr_t parent_or_count = (uintptr_t)a;
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return parent_or_count;
}
static upb_alloc* _upb_Arena_BlockAlloc(upb_Arena* arena) {
return (upb_alloc*)(arena->block_alloc & ~0x1);
}
static uintptr_t _upb_Arena_MakeBlockAlloc(upb_alloc* alloc, bool has_initial) {
uintptr_t alloc_uint = (uintptr_t)alloc;
UPB_ASSERT((alloc_uint & 1) == 0);
return alloc_uint | (has_initial ? 1 : 0);
}
static bool _upb_Arena_HasInitialBlock(upb_Arena* arena) {
return arena->block_alloc & 0x1;
}
static upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
uintptr_t poc = upb_Atomic_Load(&a->parent_or_count, memory_order_acquire);
while (_upb_Arena_IsTaggedPointer(poc)) {
upb_Arena* next = _upb_Arena_PointerFromTagged(poc);
@ -5087,7 +5134,7 @@ static _upb_ArenaRoot _upb_Arena_FindRoot(upb_Arena* a) {
a = next;
poc = next_poc;
}
return (_upb_ArenaRoot){.root = a, .tagged_count = poc};
return (upb_ArenaRoot){.root = a, .tagged_count = poc};
}
size_t upb_Arena_SpaceAllocated(upb_Arena* arena) {
@ -5118,7 +5165,7 @@ uint32_t upb_Arena_DebugRefCount(upb_Arena* a) {
return _upb_Arena_RefCountFromTagged(poc);
}
static void upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
_upb_MemBlock* block = ptr;
// Insert into linked list.
@ -5126,37 +5173,36 @@ static void upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
upb_Atomic_Init(&block->next, a->blocks);
upb_Atomic_Store(&a->blocks, block, memory_order_release);
a->head.ptr = UPB_PTR_AT(block, memblock_reserve, char);
a->head.end = UPB_PTR_AT(block, size, char);
a->head.UPB_PRIVATE(ptr) = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(block, size, char);
UPB_POISON_MEMORY_REGION(a->head.ptr, a->head.end - a->head.ptr);
UPB_POISON_MEMORY_REGION(a->head.UPB_PRIVATE(ptr),
a->head.UPB_PRIVATE(end) - a->head.UPB_PRIVATE(ptr));
}
static bool upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
if (!a->block_alloc) return false;
_upb_MemBlock* last_block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
size_t last_size = last_block != NULL ? last_block->size : 128;
size_t block_size = UPB_MAX(size, last_size * 2) + memblock_reserve;
_upb_MemBlock* block = upb_malloc(upb_Arena_BlockAlloc(a), block_size);
size_t block_size = UPB_MAX(size, last_size * 2) + kUpb_MemblockReserve;
_upb_MemBlock* block = upb_malloc(_upb_Arena_BlockAlloc(a), block_size);
if (!block) return false;
upb_Arena_AddBlock(a, block, block_size);
_upb_Arena_AddBlock(a, block, block_size);
UPB_ASSERT(UPB_PRIVATE(_upb_ArenaHas)(a) >= size);
return true;
}
void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size) {
if (!upb_Arena_AllocBlock(a, size)) return NULL; /* Out of memory. */
UPB_ASSERT(_upb_ArenaHas(a) >= size);
return upb_Arena_Malloc(a, size);
void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size) {
if (!_upb_Arena_AllocBlock(a, size)) return NULL; // OOM
return upb_Arena_Malloc(a, size - UPB_ASAN_GUARD_SIZE);
}
/* Public Arena API ***********************************************************/
static upb_Arena* upb_Arena_InitSlow(upb_alloc* alloc) {
const size_t first_block_overhead = sizeof(upb_Arena) + memblock_reserve;
static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) {
const size_t first_block_overhead = sizeof(upb_Arena) + kUpb_MemblockReserve;
upb_Arena* a;
/* We need to malloc the initial block. */
// We need to malloc the initial block.
char* mem;
size_t n = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, n))) {
@ -5166,13 +5212,13 @@ static upb_Arena* upb_Arena_InitSlow(upb_alloc* alloc) {
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
n -= sizeof(*a);
a->block_alloc = upb_Arena_MakeBlockAlloc(alloc, 0);
a->block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
upb_Arena_AddBlock(a, mem, n);
_upb_Arena_AddBlock(a, mem, n);
return a;
}
@ -5193,7 +5239,7 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
n = UPB_ALIGN_DOWN(n, UPB_ALIGN_OF(upb_Arena));
if (UPB_UNLIKELY(n < sizeof(upb_Arena))) {
return upb_Arena_InitSlow(alloc);
return _upb_Arena_InitSlow(alloc);
}
a = UPB_PTR_AT(mem, n - sizeof(*a), upb_Arena);
@ -5202,21 +5248,21 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
upb_Atomic_Init(&a->next, NULL);
upb_Atomic_Init(&a->tail, a);
upb_Atomic_Init(&a->blocks, NULL);
a->block_alloc = upb_Arena_MakeBlockAlloc(alloc, 1);
a->head.ptr = mem;
a->head.end = UPB_PTR_AT(mem, n - sizeof(*a), char);
a->block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 1);
a->head.UPB_PRIVATE(ptr) = mem;
a->head.UPB_PRIVATE(end) = UPB_PTR_AT(mem, n - sizeof(*a), char);
return a;
}
static void arena_dofree(upb_Arena* a) {
static void _upb_Arena_DoFree(upb_Arena* a) {
UPB_ASSERT(_upb_Arena_RefCountFromTagged(a->parent_or_count) == 1);
while (a != NULL) {
// Load first since arena itself is likely from one of its blocks.
upb_Arena* next_arena =
(upb_Arena*)upb_Atomic_Load(&a->next, memory_order_acquire);
upb_alloc* block_alloc = upb_Arena_BlockAlloc(a);
upb_alloc* block_alloc = _upb_Arena_BlockAlloc(a);
_upb_MemBlock* block = upb_Atomic_Load(&a->blocks, memory_order_acquire);
while (block != NULL) {
// Load first since we are deleting block.
@ -5241,7 +5287,7 @@ retry:
// expensive then direct loads. As an optimization, we only do RMW ops
// when we need to update things for other threads to see.
if (poc == _upb_Arena_TaggedFromRefcount(1)) {
arena_dofree(a);
_upb_Arena_DoFree(a);
return;
}
@ -5292,14 +5338,14 @@ static upb_Arena* _upb_Arena_DoFuse(upb_Arena* a1, upb_Arena* a2,
// In parent pointer mode, it may change what pointer it refers to in the
// tree, but it will always approach a root. Any operation that walks the
// tree to the root may collapse levels of the tree concurrently.
_upb_ArenaRoot r1 = _upb_Arena_FindRoot(a1);
_upb_ArenaRoot r2 = _upb_Arena_FindRoot(a2);
upb_ArenaRoot r1 = _upb_Arena_FindRoot(a1);
upb_ArenaRoot r2 = _upb_Arena_FindRoot(a2);
if (r1.root == r2.root) return r1.root; // Already fused.
// Avoid cycles by always fusing into the root with the lower address.
if ((uintptr_t)r1.root > (uintptr_t)r2.root) {
_upb_ArenaRoot tmp = r1;
upb_ArenaRoot tmp = r1;
r1 = r2;
r2 = tmp;
}
@ -5359,7 +5405,7 @@ bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
// Do not fuse initial blocks since we cannot lifetime extend them.
// Any other fuse scenario is allowed.
if (upb_Arena_HasInitialBlock(a1) || upb_Arena_HasInitialBlock(a2)) {
if (_upb_Arena_HasInitialBlock(a1) || _upb_Arena_HasInitialBlock(a2)) {
return false;
}
@ -5374,8 +5420,8 @@ bool upb_Arena_Fuse(upb_Arena* a1, upb_Arena* a2) {
}
bool upb_Arena_IncRefFor(upb_Arena* arena, const void* owner) {
_upb_ArenaRoot r;
if (upb_Arena_HasInitialBlock(arena)) return false;
upb_ArenaRoot r;
if (_upb_Arena_HasInitialBlock(arena)) return false;
retry:
r = _upb_Arena_FindRoot(arena);
@ -14153,7 +14199,7 @@ UPB_FORCEINLINE
static void fastdecode_docopy(upb_Decoder* d, const char* ptr, uint32_t size,
int copy, char* data, size_t data_offset,
upb_StringView* dst) {
d->arena.head.ptr += copy;
d->arena.head.UPB_PRIVATE(ptr) += copy;
dst->data = data + data_offset;
UPB_UNPOISON_MEMORY_REGION(data, copy);
memcpy(data, ptr, copy);
@ -14185,8 +14231,8 @@ static void fastdecode_docopy(upb_Decoder* d, const char* ptr, uint32_t size,
ptr += tagbytes + 1; \
dst->size = size; \
\
buf = d->arena.head.ptr; \
arena_has = _upb_ArenaHas(&d->arena); \
buf = d->arena.head.UPB_PRIVATE(ptr); \
arena_has = UPB_PRIVATE(_upb_ArenaHas)(&d->arena); \
common_has = UPB_MIN(arena_has, \
upb_EpsCopyInputStream_BytesAvailable(&d->input, ptr)); \
\
@ -14363,10 +14409,10 @@ upb_Message* decode_newmsg_ceil(upb_Decoder* d, const upb_MiniTable* m,
size_t size = m->UPB_PRIVATE(size) + sizeof(upb_Message_Internal);
char* msg_data;
if (UPB_LIKELY(msg_ceil_bytes > 0 &&
_upb_ArenaHas(&d->arena) >= msg_ceil_bytes)) {
UPB_PRIVATE(_upb_ArenaHas)(&d->arena) >= msg_ceil_bytes)) {
UPB_ASSERT(size <= (size_t)msg_ceil_bytes);
msg_data = d->arena.head.ptr;
d->arena.head.ptr += size;
msg_data = d->arena.head.UPB_PRIVATE(ptr);
d->arena.head.UPB_PRIVATE(ptr) += size;
UPB_UNPOISON_MEMORY_REGION(msg_data, msg_ceil_bytes);
memset(msg_data, 0, msg_ceil_bytes);
UPB_POISON_MEMORY_REGION(msg_data + size, msg_ceil_bytes - size);

@ -636,7 +636,8 @@ typedef struct upb_Arena upb_Arena;
// LINT.IfChange(struct_definition)
typedef struct {
char *ptr, *end;
char* UPB_ONLYBITS(ptr);
char* UPB_ONLYBITS(end);
} _upb_ArenaHead;
// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts)
@ -652,33 +653,34 @@ UPB_API upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc);
UPB_API void upb_Arena_Free(upb_Arena* a);
UPB_API bool upb_Arena_Fuse(upb_Arena* a, upb_Arena* b);
bool upb_Arena_IncRefFor(upb_Arena* arena, const void* owner);
void upb_Arena_DecRefFor(upb_Arena* arena, const void* owner);
bool upb_Arena_IncRefFor(upb_Arena* a, const void* owner);
void upb_Arena_DecRefFor(upb_Arena* a, const void* owner);
void* _upb_Arena_SlowMalloc(upb_Arena* a, size_t size);
size_t upb_Arena_SpaceAllocated(upb_Arena* arena);
uint32_t upb_Arena_DebugRefCount(upb_Arena* arena);
void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size);
UPB_INLINE size_t _upb_ArenaHas(upb_Arena* a) {
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
return (size_t)(h->end - h->ptr);
size_t upb_Arena_SpaceAllocated(upb_Arena* a);
uint32_t upb_Arena_DebugRefCount(upb_Arena* a);
UPB_INLINE size_t UPB_PRIVATE(_upb_ArenaHas)(upb_Arena* a) {
const _upb_ArenaHead* h = (_upb_ArenaHead*)a;
return (size_t)(h->UPB_ONLYBITS(end) - h->UPB_ONLYBITS(ptr));
}
UPB_API_INLINE void* upb_Arena_Malloc(upb_Arena* a, size_t size) {
size = UPB_ALIGN_MALLOC(size);
size_t span = size + UPB_ASAN_GUARD_SIZE;
if (UPB_UNLIKELY(_upb_ArenaHas(a) < span)) {
return _upb_Arena_SlowMalloc(a, size);
const size_t span = size + UPB_ASAN_GUARD_SIZE;
if (UPB_UNLIKELY(UPB_PRIVATE(_upb_ArenaHas)(a) < span)) {
return UPB_PRIVATE(_upb_Arena_SlowMalloc)(a, span);
}
// We have enough space to do a fast malloc.
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
void* ret = h->ptr;
void* ret = h->UPB_ONLYBITS(ptr);
UPB_ASSERT(UPB_ALIGN_MALLOC((uintptr_t)ret) == (uintptr_t)ret);
UPB_ASSERT(UPB_ALIGN_MALLOC(size) == size);
UPB_UNPOISON_MEMORY_REGION(ret, size);
h->ptr += span;
h->UPB_ONLYBITS(ptr) += span;
return ret;
}
@ -693,9 +695,10 @@ UPB_API_INLINE void upb_Arena_ShrinkLast(upb_Arena* a, void* ptr,
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
// Must be the last alloc.
UPB_ASSERT((char*)ptr + oldsize == h->ptr - UPB_ASAN_GUARD_SIZE);
UPB_ASSERT((char*)ptr + oldsize ==
h->UPB_ONLYBITS(ptr) - UPB_ASAN_GUARD_SIZE);
UPB_ASSERT(size <= oldsize);
h->ptr = (char*)ptr + size;
h->UPB_ONLYBITS(ptr) = (char*)ptr + size;
}
UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
@ -703,12 +706,13 @@ UPB_API_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
bool is_most_recent_alloc = (uintptr_t)ptr + oldsize == (uintptr_t)h->ptr;
bool is_most_recent_alloc =
(uintptr_t)ptr + oldsize == (uintptr_t)h->UPB_ONLYBITS(ptr);
if (is_most_recent_alloc) {
ptrdiff_t diff = size - oldsize;
if ((ptrdiff_t)_upb_ArenaHas(a) >= diff) {
h->ptr += diff;
if ((ptrdiff_t)UPB_PRIVATE(_upb_ArenaHas)(a) >= diff) {
h->UPB_ONLYBITS(ptr) += diff;
return ptr;
}
} else if (size <= oldsize) {
@ -12073,51 +12077,6 @@ struct upb_Arena {
};
// LINT.ThenChange(//depot/google3/third_party/upb/bits/typescript/arena.ts)
UPB_INLINE bool _upb_Arena_IsTaggedRefcount(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 1;
}
UPB_INLINE bool _upb_Arena_IsTaggedPointer(uintptr_t parent_or_count) {
return (parent_or_count & 1) == 0;
}
UPB_INLINE uintptr_t _upb_Arena_RefCountFromTagged(uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count >> 1;
}
UPB_INLINE uintptr_t _upb_Arena_TaggedFromRefcount(uintptr_t refcount) {
uintptr_t parent_or_count = (refcount << 1) | 1;
UPB_ASSERT(_upb_Arena_IsTaggedRefcount(parent_or_count));
return parent_or_count;
}
UPB_INLINE upb_Arena* _upb_Arena_PointerFromTagged(uintptr_t parent_or_count) {
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return (upb_Arena*)parent_or_count;
}
UPB_INLINE uintptr_t _upb_Arena_TaggedFromPointer(upb_Arena* a) {
uintptr_t parent_or_count = (uintptr_t)a;
UPB_ASSERT(_upb_Arena_IsTaggedPointer(parent_or_count));
return parent_or_count;
}
UPB_INLINE upb_alloc* upb_Arena_BlockAlloc(upb_Arena* arena) {
return (upb_alloc*)(arena->block_alloc & ~0x1);
}
UPB_INLINE uintptr_t upb_Arena_MakeBlockAlloc(upb_alloc* alloc,
bool has_initial) {
uintptr_t alloc_uint = (uintptr_t)alloc;
UPB_ASSERT((alloc_uint & 1) == 0);
return alloc_uint | (has_initial ? 1 : 0);
}
UPB_INLINE bool upb_Arena_HasInitialBlock(upb_Arena* arena) {
return arena->block_alloc & 0x1;
}
#endif /* UPB_MEM_INTERNAL_ARENA_H_ */

Loading…
Cancel
Save