[PairPos] Use a class cache

10% speedup on Roboto-Regular. 5% on SF Pro.
pull/5043/head
Behdad Esfahbod 1 week ago
parent acd122d030
commit 726af2e4e5
  1. 44
      src/OT/Layout/GPOS/PairPosFormat2.hh
  2. 60
      src/hb-ot-layout-gsubgpos.hh

@ -123,7 +123,43 @@ struct PairPosFormat2_4 : ValueBase
const Coverage &get_coverage () const { return this+coverage; }
bool apply (hb_ot_apply_context_t *c) const
using pair_pos_cache_t = hb_pair_t<hb_ot_class_cache_t, hb_ot_class_cache_t>;
unsigned cache_cost () const
{
unsigned c = (this+classDef1).cost () + (this+classDef2).cost ();
return c >= 4 ? c : 0;
}
static void * cache_func (void *p, hb_ot_lookup_cache_op_t op)
{
switch (op)
{
case hb_ot_lookup_cache_op_t::CREATE:
{
pair_pos_cache_t *cache = (pair_pos_cache_t *) hb_malloc (sizeof (pair_pos_cache_t));
if (likely (cache))
{
cache->first.clear ();
cache->second.clear ();
}
return cache;
}
case hb_ot_lookup_cache_op_t::ENTER:
return (void *) true;
case hb_ot_lookup_cache_op_t::LEAVE:
return nullptr;
case hb_ot_lookup_cache_op_t::DESTROY:
{
pair_pos_cache_t *cache = (pair_pos_cache_t *) p;
hb_free (cache);
return nullptr;
}
}
}
bool apply_cached (hb_ot_apply_context_t *c) const { return _apply (c, true); }
bool apply (hb_ot_apply_context_t *c) const { return _apply (c, false); }
bool _apply (hb_ot_apply_context_t *c, bool cached) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
@ -139,8 +175,14 @@ struct PairPosFormat2_4 : ValueBase
return_trace (false);
}
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
pair_pos_cache_t *cache = cached ? (pair_pos_cache_t *) c->lookup_accel->cache : nullptr;
unsigned int klass1 = (this+classDef1).get_class (buffer->cur().codepoint, cache ? &cache->first : nullptr);
unsigned int klass2 = (this+classDef2).get_class (buffer->info[skippy_iter.idx].codepoint, cache ? &cache->second : nullptr);
#else
unsigned int klass1 = (this+classDef1).get_class (buffer->cur().codepoint);
unsigned int klass2 = (this+classDef2).get_class (buffer->info[skippy_iter.idx].codepoint);
#endif
if (unlikely (klass1 >= class1Count || klass2 >= class2Count))
{
buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1);

@ -713,6 +713,7 @@ struct hb_ot_apply_context_t :
recurse_func_t recurse_func = nullptr;
const GDEF &gdef;
const GDEF::accelerator_t &gdef_accel;
const hb_ot_layout_lookup_accelerator_t *lookup_accel = nullptr;
const ItemVariationStore &var_store;
ItemVariationStore::cache_t *var_store_cache;
hb_set_digest_t digest;
@ -930,27 +931,23 @@ struct hb_accelerate_subtables_context_t :
}
template <typename T>
static inline auto cache_func_ (const T *obj,
hb_ot_apply_context_t *c,
static inline auto cache_func_ (void *p,
hb_ot_lookup_cache_op_t op,
hb_priority<1>) HB_RETURN (void *, obj->cache_func (c, op) )
template <typename T>
static inline void * cache_func_ (const T *obj,
hb_ot_apply_context_t *c,
hb_priority<1>) HB_RETURN (void *, T::cache_func (p, op) )
template <typename T=void>
static inline void * cache_func_ (void *p,
hb_ot_lookup_cache_op_t op HB_UNUSED,
hb_priority<0>) { return (void *) false; }
template <typename Type>
static inline void * cache_func_to (const void *obj,
hb_ot_apply_context_t *c,
static inline void * cache_func_to (void *p,
hb_ot_lookup_cache_op_t op)
{
const Type *typed_obj = (const Type *) obj;
return cache_func_ (typed_obj, c, op, hb_prioritize);
return cache_func_<Type> (p, op, hb_prioritize);
}
#endif
typedef bool (*hb_apply_func_t) (const void *obj, hb_ot_apply_context_t *c);
typedef void * (*hb_cache_func_t) (const void *obj, hb_ot_apply_context_t *c, hb_ot_lookup_cache_op_t op);
typedef void * (*hb_cache_func_t) (void *p, hb_ot_lookup_cache_op_t op);
struct hb_applicable_t
{
@ -987,11 +984,11 @@ struct hb_accelerate_subtables_context_t :
}
bool cache_enter (hb_ot_apply_context_t *c) const
{
return (bool) cache_func (obj, c, hb_ot_lookup_cache_op_t::ENTER);
return (bool) cache_func (c, hb_ot_lookup_cache_op_t::ENTER);
}
void cache_leave (hb_ot_apply_context_t *c) const
{
cache_func (obj, c, hb_ot_lookup_cache_op_t::LEAVE);
cache_func (c, hb_ot_lookup_cache_op_t::LEAVE);
}
#endif
@ -2638,7 +2635,7 @@ struct ContextFormat2_5
unsigned c = (this+classDef).cost () * ruleSet.len;
return c >= 4 ? c : 0;
}
void * cache_func (hb_ot_apply_context_t *c, hb_ot_lookup_cache_op_t op) const
static void * cache_func (void *p, hb_ot_lookup_cache_op_t op)
{
switch (op)
{
@ -2646,6 +2643,7 @@ struct ContextFormat2_5
return (void *) true;
case hb_ot_lookup_cache_op_t::ENTER:
{
hb_ot_apply_context_t *c = (hb_ot_apply_context_t *) p;
if (!HB_BUFFER_TRY_ALLOCATE_VAR (c->buffer, syllable))
return (void *) false;
auto &info = c->buffer->info;
@ -2657,9 +2655,10 @@ struct ContextFormat2_5
}
case hb_ot_lookup_cache_op_t::LEAVE:
{
hb_ot_apply_context_t *c = (hb_ot_apply_context_t *) p;
c->new_syllables = (unsigned) -1;
HB_BUFFER_DEALLOCATE_VAR (c->buffer, syllable);
return (void *) true;
return nullptr;
}
case hb_ot_lookup_cache_op_t::DESTROY:
return nullptr;
@ -3886,7 +3885,7 @@ struct ChainContextFormat2_5
unsigned c = (this+lookaheadClassDef).cost () * ruleSet.len;
return c >= 4 ? c : 0;
}
void * cache_func (hb_ot_apply_context_t *c, hb_ot_lookup_cache_op_t op) const
static void * cache_func (void *p, hb_ot_lookup_cache_op_t op)
{
switch (op)
{
@ -3894,6 +3893,7 @@ struct ChainContextFormat2_5
return (void *) true;
case hb_ot_lookup_cache_op_t::ENTER:
{
hb_ot_apply_context_t *c = (hb_ot_apply_context_t *) p;
if (!HB_BUFFER_TRY_ALLOCATE_VAR (c->buffer, syllable))
return (void *) false;
auto &info = c->buffer->info;
@ -3905,9 +3905,10 @@ struct ChainContextFormat2_5
}
case hb_ot_lookup_cache_op_t::LEAVE:
{
hb_ot_apply_context_t *c = (hb_ot_apply_context_t *) p;
c->new_syllables = (unsigned) -1;
HB_BUFFER_DEALLOCATE_VAR (c->buffer, syllable);
return (void *) true;
return nullptr;
}
case hb_ot_lookup_cache_op_t::DESTROY:
return nullptr;
@ -4438,6 +4439,14 @@ struct hb_ot_layout_lookup_accelerator_t
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
thiz->cache_user_idx = c_accelerate_subtables.cache_user_idx;
if (thiz->cache_user_idx != (unsigned) -1)
{
thiz->cache = thiz->subtables[thiz->cache_user_idx].cache_func (nullptr, hb_ot_lookup_cache_op_t::CREATE);
if (!thiz->cache)
thiz->cache_user_idx = (unsigned) -1;
}
for (unsigned i = 0; i < count; i++)
if (i != thiz->cache_user_idx)
thiz->subtables[i].apply_cached_func = thiz->subtables[i].apply_func;
@ -4446,6 +4455,17 @@ struct hb_ot_layout_lookup_accelerator_t
return thiz;
}
void fini ()
{
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
if (cache)
{
assert (cache_user_idx != (unsigned) -1);
subtables[cache_user_idx].cache_func (cache, hb_ot_lookup_cache_op_t::DESTROY);
}
#endif
}
bool may_have (hb_codepoint_t g) const
{ return digest.may_have (g); }
@ -4454,6 +4474,7 @@ struct hb_ot_layout_lookup_accelerator_t
#endif
bool apply (hb_ot_apply_context_t *c, unsigned subtables_count, bool use_cache) const
{
c->lookup_accel = this;
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
if (use_cache)
{
@ -4493,6 +4514,7 @@ struct hb_ot_layout_lookup_accelerator_t
hb_set_digest_t digest;
void *cache = nullptr;
private:
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
unsigned cache_user_idx = (unsigned) -1;
@ -4881,7 +4903,11 @@ struct GSUBGPOS
~accelerator_t ()
{
for (unsigned int i = 0; i < this->lookup_count; i++)
{
if (this->accels[i])
this->accels[i]->fini ();
hb_free (this->accels[i]);
}
hb_free (this->accels);
this->table.destroy ();
}

Loading…
Cancel
Save