diff --git a/src/upb_context.c b/src/upb_context.c
index 1a36d6e4ef..bf0e6df789 100644
--- a/src/upb_context.c
+++ b/src/upb_context.c
@@ -190,7 +190,7 @@ static bool insert_message(struct upb_strtable *t,
   e.e.key = fqname;
   e.type = UPB_SYM_MESSAGE;
   e.ref.msg = malloc(sizeof(*e.ref.msg));
-  if(!upb_msg_init(e.ref.msg, d, sort)) {
+  if(!upb_msg_init(e.ref.msg, d, fqname, sort)) {
     free(fqname.ptr);
     return false;
   }
diff --git a/src/upb_msg.c b/src/upb_msg.c
index e7158f9343..d44649edd5 100644
--- a/src/upb_msg.c
+++ b/src/upb_msg.c
@@ -35,7 +35,7 @@ static int compare_fields(const void *e1, const void *e2) {
 }
 
 bool upb_msg_init(struct upb_msg *m, struct google_protobuf_DescriptorProto *d,
-                  bool sort)
+                  struct upb_string fqname, bool sort)
 {
   /* TODO: more complete validation.
    * TODO: re-enable this check when we properly set this flag. */
@@ -47,6 +47,7 @@ bool upb_msg_init(struct upb_msg *m, struct google_protobuf_DescriptorProto *d,
                     sizeof(struct upb_fieldsbyname_entry));
 
   m->descriptor = d;
+  m->fqname = fqname;
   m->num_fields = d->field->len;
   m->set_flags_bytes = div_round_up(m->num_fields, 8);
   /* These are incremented in the loop. */
@@ -105,7 +106,7 @@ void upb_msg_ref(struct upb_msg *m, struct upb_msg_field *f,
                  union upb_symbol_ref ref) {
   struct google_protobuf_FieldDescriptorProto *d =
       upb_msg_field_descriptor(f, m);
-  struct upb_fieldsbynum_entry *int_e = upb_inttable_lookup(
+  struct upb_fieldsbynum_entry *int_e = upb_inttable_fast_lookup(
       &m->fields_by_num, d->number, sizeof(struct upb_fieldsbynum_entry));
   struct upb_fieldsbyname_entry *str_e =
       upb_strtable_lookup(&m->fields_by_name, d->name);
diff --git a/src/upb_msg.h b/src/upb_msg.h
index 9eaf6c2cfb..6be9405956 100644
--- a/src/upb_msg.h
+++ b/src/upb_msg.h
@@ -164,11 +164,15 @@ INLINE struct google_protobuf_FieldDescriptorProto *upb_msg_field_descriptor(
  * the caller should do that post-initialization by calling upb_msg_ref()
  * below.
  *
+ * fqname indicates the fully-qualified name of this message.  Ownership of
+ * fqname passes to the msg, but the msg will contain references to it, so it
+ * must outlive the msg.
+ *
  * sort indicates whether or not it is safe to reorder the fields from the order
  * they appear in d.  This should be false if code has been compiled against a
  * header for this type that expects the given order. */
 bool upb_msg_init(struct upb_msg *m, struct google_protobuf_DescriptorProto *d,
-                  bool sort);
+                  struct upb_string fqname, bool sort);
 void upb_msg_free(struct upb_msg *m);
 
 /* Clients use this function on a previously initialized upb_msg to resolve the
@@ -182,7 +186,7 @@ void upb_msg_ref(struct upb_msg *m, struct upb_msg_field *f, union upb_symbol_re
 INLINE struct upb_msg_field *upb_msg_fieldbynum(struct upb_msg *m,
                                                 uint32_t number) {
   struct upb_fieldsbynum_entry *e =
-      (struct upb_fieldsbynum_entry*)upb_inttable_lookup(
+      (struct upb_fieldsbynum_entry*)upb_inttable_fast_lookup(
           &m->fields_by_num, number, sizeof(struct upb_fieldsbynum_entry));
   return e ? &e->f : NULL;
 }
diff --git a/src/upb_table.c b/src/upb_table.c
index 584bbd67df..063b0b627b 100644
--- a/src/upb_table.c
+++ b/src/upb_table.c
@@ -82,7 +82,7 @@ static uint32_t empty_intbucket(struct upb_inttable *table)
  * parameterize them. */
 static void intinsert(struct upb_inttable *t, struct upb_inttable_entry *e)
 {
-  assert(upb_inttable_lookup(t, e->key, t->t.entry_size) == NULL);
+  assert(upb_inttable_lookup(t, e->key) == NULL);
   t->t.count++;
   uint32_t bucket = upb_inttable_bucket(t, e->key);
   struct upb_inttable_entry *table_e = intent(t, bucket);
@@ -115,7 +115,7 @@ static void intinsert(struct upb_inttable *t, struct upb_inttable_entry *e)
   }
   memcpy(table_e, e, t->t.entry_size);
   table_e->next = UPB_END_OF_CHAIN;
-  assert(upb_inttable_lookup(t, e->key, t->t.entry_size) == table_e);
+  assert(upb_inttable_lookup(t, e->key) == table_e);
 }
 
 void upb_inttable_insert(struct upb_inttable *t, struct upb_inttable_entry *e)
diff --git a/src/upb_table.h b/src/upb_table.h
index 094ed488de..3855e3ef82 100644
--- a/src/upb_table.h
+++ b/src/upb_table.h
@@ -77,6 +77,14 @@ INLINE uint32_t upb_strtable_size(struct upb_strtable *t) {
   return upb_table_size(&t->t);
 }
 
+INLINE uint32_t upb_table_count(struct upb_table *t) { return t->count; }
+INLINE uint32_t upb_inttable_count(struct upb_inttable *t) {
+  return upb_table_count(&t->t);
+}
+INLINE uint32_t upb_strtable_count(struct upb_strtable *t) {
+  return upb_table_count(&t->t);
+}
+
 /* Inserts the given key into the hashtable with the given value.  The key must
  * not already exist in the hash table.  The data will be copied from e into
  * the hashtable (the amount of data copied comes from entry_size when the
@@ -93,8 +101,8 @@ INLINE uint32_t upb_inttable_bucket(struct upb_inttable *t, upb_inttable_key_t k
  * of parsing.  We have the caller specify the entry_size because fixing
  * this as a literal (instead of reading table->entry_size) gives the
  * compiler more ability to optimize. */
-INLINE void *upb_inttable_lookup(struct upb_inttable *t,
-                                 uint32_t key, uint32_t entry_size) {
+INLINE void *upb_inttable_fast_lookup(struct upb_inttable *t,
+                                      uint32_t key, uint32_t entry_size) {
   assert(key != 0);
   uint32_t bucket = upb_inttable_bucket(t, key);
   struct upb_inttable_entry *e;
@@ -105,6 +113,10 @@ INLINE void *upb_inttable_lookup(struct upb_inttable *t,
   return NULL;  /* Not found. */
 }
 
+INLINE void *upb_inttable_lookup(struct upb_inttable *t, uint32_t key) {
+  return upb_inttable_fast_lookup(t, key, t->t.entry_size);
+}
+
 void *upb_strtable_lookup(struct upb_strtable *t, struct upb_string *key);
 
 /* Provides iteration over the table.  The order in which the entries are