Auto-generate files after cl/713536794

pull/19937/head
Protobuf Team Bot 4 months ago
parent 66de8e7c34
commit b0b7bf3d9a
  1. 25
      php/ext/google/protobuf/php-upb.c
  2. 5
      php/ext/google/protobuf/php-upb.h
  3. 25
      ruby/ext/google/protobuf_c/ruby-upb.c
  4. 5
      ruby/ext/google/protobuf_c/ruby-upb.h

@ -2932,7 +2932,7 @@ void upb_Arena_SetMaxBlockSize(size_t max) {
typedef struct upb_MemBlock {
struct upb_MemBlock* next;
uint32_t size;
size_t size;
// Data follows.
} upb_MemBlock;
@ -3186,17 +3186,18 @@ uint32_t upb_Arena_DebugRefCount(const upb_Arena* a) {
return (uint32_t)_upb_Arena_RefCountFromTagged(tagged);
}
static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t available,
size_t block_size) {
upb_ArenaInternal* ai = upb_Arena_Internal(a);
upb_MemBlock* block = ptr;
block->size = (uint32_t)size;
block->size = block_size;
// Insert into linked list.
block->next = ai->blocks;
ai->blocks = block;
a->UPB_PRIVATE(ptr) = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
a->UPB_PRIVATE(end) = UPB_PTR_AT(block, size, char);
a->UPB_PRIVATE(end) = UPB_PTR_AT(block, available, char);
UPB_POISON_MEMORY_REGION(a->UPB_PRIVATE(ptr),
a->UPB_PRIVATE(end) - a->UPB_PRIVATE(ptr));
@ -3226,7 +3227,7 @@ static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
upb_malloc(_upb_ArenaInternal_BlockAlloc(ai), block_size);
if (!block) return false;
_upb_Arena_AddBlock(a, block, block_size);
_upb_Arena_AddBlock(a, block, block_size, block_size);
// Atomic add not required here, as threads won't race allocating blocks, plus
// atomic fetch-add is slower than load/add/store on arm devices compiled
// targetting pre-v8.1. Relaxed order is safe as nothing depends on order of
@ -3252,25 +3253,25 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) {
// We need to malloc the initial block.
char* mem;
size_t n = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, n))) {
size_t block_size = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, block_size))) {
return NULL;
}
a = UPB_PTR_AT(mem, n - sizeof(upb_ArenaState), upb_ArenaState);
n -= sizeof(upb_ArenaState);
size_t available = block_size - sizeof(upb_ArenaState);
a = UPB_PTR_AT(mem, available, upb_ArenaState);
a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->body.next, NULL);
upb_Atomic_Init(&a->body.previous_or_tail,
_upb_Arena_TaggedFromTail(&a->body));
upb_Atomic_Init(&a->body.space_allocated, n);
upb_Atomic_Init(&a->body.space_allocated, block_size);
a->body.blocks = NULL;
a->body.upb_alloc_cleanup = NULL;
UPB_TSAN_INIT_PUBLISHED(&a->body);
_upb_Arena_AddBlock(&a->head, mem, n);
_upb_Arena_AddBlock(&a->head, mem, available, block_size);
return &a->head;
}
@ -3338,7 +3339,7 @@ static void _upb_Arena_DoFree(upb_ArenaInternal* ai) {
while (block != NULL) {
// Load first since we are deleting block.
upb_MemBlock* next_block = block->next;
upb_free(block_alloc, block);
upb_free_sized(block_alloc, block, block->size);
block = next_block;
}
if (alloc_cleanup != NULL) {

@ -609,6 +609,11 @@ UPB_INLINE void upb_free(upb_alloc* alloc, void* ptr) {
alloc->func(alloc, ptr, 0, 0);
}
UPB_INLINE void upb_free_sized(upb_alloc* alloc, void* ptr, size_t size) {
UPB_ASSERT(alloc);
alloc->func(alloc, ptr, size, 0);
}
// The global allocator used by upb. Uses the standard malloc()/free().
extern upb_alloc upb_alloc_global;

@ -2932,7 +2932,7 @@ void upb_Arena_SetMaxBlockSize(size_t max) {
typedef struct upb_MemBlock {
struct upb_MemBlock* next;
uint32_t size;
size_t size;
// Data follows.
} upb_MemBlock;
@ -3186,17 +3186,18 @@ uint32_t upb_Arena_DebugRefCount(const upb_Arena* a) {
return (uint32_t)_upb_Arena_RefCountFromTagged(tagged);
}
static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t size) {
static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t available,
size_t block_size) {
upb_ArenaInternal* ai = upb_Arena_Internal(a);
upb_MemBlock* block = ptr;
block->size = (uint32_t)size;
block->size = block_size;
// Insert into linked list.
block->next = ai->blocks;
ai->blocks = block;
a->UPB_PRIVATE(ptr) = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
a->UPB_PRIVATE(end) = UPB_PTR_AT(block, size, char);
a->UPB_PRIVATE(end) = UPB_PTR_AT(block, available, char);
UPB_POISON_MEMORY_REGION(a->UPB_PRIVATE(ptr),
a->UPB_PRIVATE(end) - a->UPB_PRIVATE(ptr));
@ -3226,7 +3227,7 @@ static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
upb_malloc(_upb_ArenaInternal_BlockAlloc(ai), block_size);
if (!block) return false;
_upb_Arena_AddBlock(a, block, block_size);
_upb_Arena_AddBlock(a, block, block_size, block_size);
// Atomic add not required here, as threads won't race allocating blocks, plus
// atomic fetch-add is slower than load/add/store on arm devices compiled
// targetting pre-v8.1. Relaxed order is safe as nothing depends on order of
@ -3252,25 +3253,25 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc) {
// We need to malloc the initial block.
char* mem;
size_t n = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, n))) {
size_t block_size = first_block_overhead + 256;
if (!alloc || !(mem = upb_malloc(alloc, block_size))) {
return NULL;
}
a = UPB_PTR_AT(mem, n - sizeof(upb_ArenaState), upb_ArenaState);
n -= sizeof(upb_ArenaState);
size_t available = block_size - sizeof(upb_ArenaState);
a = UPB_PTR_AT(mem, available, upb_ArenaState);
a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 0);
upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1));
upb_Atomic_Init(&a->body.next, NULL);
upb_Atomic_Init(&a->body.previous_or_tail,
_upb_Arena_TaggedFromTail(&a->body));
upb_Atomic_Init(&a->body.space_allocated, n);
upb_Atomic_Init(&a->body.space_allocated, block_size);
a->body.blocks = NULL;
a->body.upb_alloc_cleanup = NULL;
UPB_TSAN_INIT_PUBLISHED(&a->body);
_upb_Arena_AddBlock(&a->head, mem, n);
_upb_Arena_AddBlock(&a->head, mem, available, block_size);
return &a->head;
}
@ -3338,7 +3339,7 @@ static void _upb_Arena_DoFree(upb_ArenaInternal* ai) {
while (block != NULL) {
// Load first since we are deleting block.
upb_MemBlock* next_block = block->next;
upb_free(block_alloc, block);
upb_free_sized(block_alloc, block, block->size);
block = next_block;
}
if (alloc_cleanup != NULL) {

@ -611,6 +611,11 @@ UPB_INLINE void upb_free(upb_alloc* alloc, void* ptr) {
alloc->func(alloc, ptr, 0, 0);
}
UPB_INLINE void upb_free_sized(upb_alloc* alloc, void* ptr, size_t size) {
UPB_ASSERT(alloc);
alloc->func(alloc, ptr, size, 0);
}
// The global allocator used by upb. Uses the standard malloc()/free().
extern upb_alloc upb_alloc_global;

Loading…
Cancel
Save