Addressed PR comments.

pull/13171/head
Joshua Haberman 3 years ago
parent 8405436044
commit 7c4d12e856
  1. 42
      upb/mini_table.c
  2. 4
      upb/mini_table.hpp
  3. 6
      upb/msg.c
  4. 3
      upb/msg_internal.h
  5. 26
      upb/upb.h

@ -265,10 +265,13 @@ const upb_MiniTable_Field* upb_MiniTable_FindFieldByNumber(
/** Data decoder **************************************************************/
// Note: we sort by this number when calculating layout order.
typedef enum {
kUpb_LayoutItemType_Field, // Non-oneof field data.
kUpb_LayoutItemType_OneofField, // Oneof field data.
kUpb_LayoutItemType_OneofCase, // Oneof case.
kUpb_LayoutItemType_OneofField, // Oneof field data.
kUpb_LayoutItemType_Field, // Non-oneof field data.
kUpb_LayoutItemType_Max = kUpb_LayoutItemType_Field,
} upb_LayoutItemType;
#define kUpb_LayoutItem_IndexSentinel ((uint16_t)-1)
@ -621,32 +624,34 @@ static void upb_MtDecoder_ParseMessage(upb_MtDecoder* d, const char* data,
upb_MtDecoder_Parse(d, data, len, d->fields, sizeof(*d->fields),
&d->table->field_count, &sub_count);
// Return unused memory from fields array.
UPB_ASSERT(d->table->field_count <= len);
UPB_ASSERT(
_upb_Arena_IsLastAlloc(d->arena, d->fields, sizeof(*d->fields) * len));
d->fields = upb_Arena_Realloc(d->arena, d->fields, sizeof(*d->fields) * len,
sizeof(*d->fields) * d->table->field_count);
upb_Arena_ShrinkLast(d->arena, d->fields, sizeof(*d->fields) * len,
sizeof(*d->fields) * d->table->field_count);
d->table->fields = d->fields;
upb_MtDecoder_AllocateSubs(d, sub_count);
}
#define UPB_COMPARE_INTEGERS(a, b) ((a) < (b) ? -1 : ((a) == (b) ? 0 : 1))
#define UPB_COMBINE(rep, ty, idx) (((rep << type_bits) | ty) << idx_bits) | idx
int upb_MtDecoder_CompareFields(const void* _a, const void* _b) {
const upb_LayoutItem* a = _a;
const upb_LayoutItem* b = _b;
// Currently we just sort by:
// 1. rep (descending, so largest fields are first)
// 2. is_case (descending, so oneof cases are first)
// 2. field_number (ascending, so smallest numbers are first)
//
// 1. rep (smallest fields first)
// 2. type (oneof cases first)
// 2. field_number (smallest numbers first)
// The main goal of this is to reduce space lost to padding.
if (a->rep != b->rep) return UPB_COMPARE_INTEGERS(a->rep, b->rep);
if (a->type != b->type) return UPB_COMPARE_INTEGERS(a->type, b->type);
return UPB_COMPARE_INTEGERS(b->field_index, a->field_index);
// Later we may have more subtle reasons to prefer a different ordering.
const int rep_bits = _upb_Log2Ceiling(kUpb_FieldRep_Max);
const int type_bits = _upb_Log2Ceiling(kUpb_LayoutItemType_Max);
const int idx_bits = (sizeof(a->field_index) * 8);
UPB_ASSERT(idx_bits + rep_bits + type_bits < 32);
uint32_t a_packed = UPB_COMBINE(a->rep, a->type, a->field_index);
uint32_t b_packed = UPB_COMBINE(b->rep, b->type, b->field_index);
return UPB_COMPARE_INTEGERS(a_packed, b_packed);
}
#undef UPB_COMBINE
#undef UPB_COMPARE_INTEGERS
static bool upb_MtDecoder_SortLayoutItems(upb_MtDecoder* d) {
@ -656,7 +661,8 @@ static bool upb_MtDecoder_SortLayoutItems(upb_MtDecoder* d) {
upb_MiniTable_Field* f = &d->fields[i];
if (f->offset >= kOneofBase) continue;
upb_LayoutItem item = {.field_index = i,
.rep = f->mode >> kUpb_FieldRep_Shift};
.rep = f->mode >> kUpb_FieldRep_Shift,
.type = kUpb_LayoutItemType_Field};
upb_MtDecoder_PushItem(d, item);
}
@ -756,6 +762,7 @@ static bool upb_MtDecoder_AssignOffsets(upb_MtDecoder* d) {
while (true) {
f->presence = ~item->offset;
if (f->offset == kUpb_LayoutItem_IndexSentinel) break;
UPB_ASSERT(f->offset - kOneofBase < d->table->field_count);
f = &d->fields[f->offset - kOneofBase];
}
}
@ -903,8 +910,7 @@ upb_MiniTable_Extension* upb_MiniTable_BuildExtensions(const char* data,
exts = upb_Arena_Malloc(arena, len);
upb_MtDecoder_CheckOutOfMemory(&decoder, exts);
upb_MtDecoder_Parse(&decoder, data, len, exts, sizeof(*exts), &count, NULL);
exts = upb_Arena_Realloc(arena, exts, sizeof(*exts) * len,
sizeof(*exts) * count);
upb_Arena_ShrinkLast(arena, exts, sizeof(*exts) * len, sizeof(*exts) * count);
done:
*ext_count = count;

@ -76,7 +76,9 @@ class MtDataEncoder {
bool operator()(T&& func) {
char* end = func(buf_);
if (!end) return false;
str_.reserve(_upb_Log2Ceiling(str_.size() + (end - buf_)));
// C++ does not guarantee that string has doubling growth behavior, but
// we need it to avoid O(n^2).
str_.reserve(_upb_Log2CeilingSize(str_.size() + (end - buf_)));
str_.append(buf_, end - buf_);
return true;
}

@ -55,7 +55,7 @@ static bool realloc_internal(upb_Message* msg, size_t need, upb_Arena* arena) {
upb_Message_Internal* in = upb_Message_Getinternal(msg);
if (!in->internal) {
/* No internal data, allocate from scratch. */
size_t size = UPB_MAX(128, _upb_Log2Ceilingsize(need + overhead));
size_t size = UPB_MAX(128, _upb_Log2CeilingSize(need + overhead));
upb_Message_InternalData* internal = upb_Arena_Malloc(arena, size);
if (!internal) return false;
internal->size = size;
@ -64,7 +64,7 @@ static bool realloc_internal(upb_Message* msg, size_t need, upb_Arena* arena) {
in->internal = internal;
} else if (in->internal->ext_begin - in->internal->unknown_end < need) {
/* Internal data is too small, reallocate. */
size_t new_size = _upb_Log2Ceilingsize(in->internal->size + need);
size_t new_size = _upb_Log2CeilingSize(in->internal->size + need);
size_t ext_bytes = in->internal->size - in->internal->ext_begin;
size_t new_ext_begin = new_size - ext_bytes;
upb_Message_InternalData* internal =
@ -310,7 +310,7 @@ bool _upb_mapsorter_pushmap(_upb_mapsorter* s, upb_FieldType key_type,
/* Grow s->entries if necessary. */
if (sorted->end > s->cap) {
s->cap = _upb_Log2Ceilingsize(sorted->end);
s->cap = _upb_Log2CeilingSize(sorted->end);
s->entries = realloc(s->entries, s->cap * sizeof(*s->entries));
if (!s->entries) return false;
}

@ -92,13 +92,16 @@ typedef enum {
kUpb_LabelFlags_IsExtension = 8,
} upb_LabelFlags;
// Note: we sort by this number when calculating layout order.
typedef enum {
kUpb_FieldRep_1Byte = 0,
kUpb_FieldRep_4Byte = 1,
kUpb_FieldRep_StringView = 2,
kUpb_FieldRep_Pointer = 3,
kUpb_FieldRep_8Byte = 4,
kUpb_FieldRep_Shift = 5, // Bit offset of the rep in upb_MiniTable_Field.mode
kUpb_FieldRep_Max = kUpb_FieldRep_8Byte,
} upb_FieldRep;
UPB_INLINE upb_FieldMode upb_FieldMode_Get(const upb_MiniTable_Field* field) {

@ -223,8 +223,32 @@ UPB_INLINE void* upb_Arena_Malloc(upb_Arena* a, size_t size) {
return ret;
}
// Call to shrink the last alloc from this arena.
// REQUIRES: (ptr, oldsize) was the last malloc/realloc from this arena.
// We could also add a upb_Arena_TryShrinkLast() which is simply a no-op if
// this was not the last alloc.
UPB_INLINE void upb_Arena_ShrinkLast(upb_Arena* a, void* ptr, size_t oldsize,
size_t size) {
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
UPB_ASSERT((char*)ptr + oldsize == h->ptr); // Must be the last alloc.
UPB_ASSERT(size <= oldsize);
h->ptr = (char*)ptr + size;
}
UPB_INLINE void* upb_Arena_Realloc(upb_Arena* a, void* ptr, size_t oldsize,
size_t size) {
_upb_ArenaHead* h = (_upb_ArenaHead*)a;
oldsize = UPB_ALIGN_MALLOC(oldsize);
size = UPB_ALIGN_MALLOC(size);
if (size <= oldsize) {
if ((char*)ptr + oldsize == h->ptr) {
upb_Arena_ShrinkLast(a, ptr, oldsize, size);
}
return ptr;
}
void* ret = upb_Arena_Malloc(a, size);
if (ret && oldsize > 0) {
@ -332,7 +356,7 @@ UPB_INLINE int _upb_Log2Ceiling(int x) {
#endif
}
UPB_INLINE int _upb_Log2Ceilingsize(int x) { return 1 << _upb_Log2Ceiling(x); }
UPB_INLINE int _upb_Log2CeilingSize(int x) { return 1 << _upb_Log2Ceiling(x); }
#include "upb/port_undef.inc"

Loading…
Cancel
Save