[atomic-int] Add operators for relaxed ops

pull/3777/head
Behdad Esfahbod 2 years ago
parent 86d1e22d4f
commit f73c15ca6c
  1. 3
      src/hb-atomic.hh
  2. 22
      src/hb-bit-set.hh
  3. 6
      src/hb-cache.hh
  4. 2
      src/hb-common.cc
  5. 4
      src/hb-debug.hh
  6. 6
      src/hb-face.cc
  7. 4
      src/hb-face.hh
  8. 16
      src/hb-object.hh
  9. 8
      src/hb-ot-shaper-indic.cc
  10. 4
      src/hb-ot-tag.cc
  11. 4
      src/hb-static.cc

@ -159,6 +159,9 @@ struct hb_atomic_int_t
hb_atomic_int_t () = default;
constexpr hb_atomic_int_t (int v) : v (v) {}
hb_atomic_int_t& operator = (int v_) { set_relaxed (v_); return *this; }
operator int () const { return get_relaxed (); }
void set_relaxed (int v_) { hb_atomic_int_impl_set_relaxed (&v, v_); }
void set_release (int v_) { hb_atomic_int_impl_set (&v, v_); }
int get_relaxed () const { return hb_atomic_int_impl_get_relaxed (&v); }

@ -56,7 +56,7 @@ struct hb_bit_set_t
{
successful = true;
population = 0;
last_page_lookup.set_relaxed (0);
last_page_lookup = 0;
page_map.init ();
pages.init ();
}
@ -614,7 +614,7 @@ struct hb_bit_set_t
const auto* page_map_array = page_map.arrayZ;
unsigned int major = get_major (*codepoint);
unsigned int i = last_page_lookup.get_relaxed ();
unsigned int i = last_page_lookup;
if (unlikely (i >= page_map.length || page_map_array[i].major != major))
{
@ -632,7 +632,7 @@ struct hb_bit_set_t
if (pages_array[current.index].next (codepoint))
{
*codepoint += current.major * page_t::PAGE_BITS;
last_page_lookup.set_relaxed (i);
last_page_lookup = i;
return true;
}
i++;
@ -645,11 +645,11 @@ struct hb_bit_set_t
if (m != INVALID)
{
*codepoint = current.major * page_t::PAGE_BITS + m;
last_page_lookup.set_relaxed (i);
last_page_lookup = i;
return true;
}
}
last_page_lookup.set_relaxed (0);
last_page_lookup = 0;
*codepoint = INVALID;
return false;
}
@ -732,7 +732,7 @@ struct hb_bit_set_t
{
const auto* page_map_array = page_map.arrayZ;
unsigned int major = get_major (codepoint);
unsigned int i = last_page_lookup.get_relaxed ();
unsigned int i = last_page_lookup;
if (unlikely (i >= page_map.length || page_map_array[i].major != major))
{
page_map.bfind (major, &i, HB_NOT_FOUND_STORE_CLOSEST);
@ -773,7 +773,7 @@ struct hb_bit_set_t
{
const auto* page_map_array = page_map.arrayZ;
unsigned int major = get_major (codepoint);
unsigned int i = last_page_lookup.get_relaxed ();
unsigned int i = last_page_lookup;
if (unlikely (i >= page_map.length || page_map_array[i].major != major))
{
page_map.bfind(major, &i, HB_NOT_FOUND_STORE_CLOSEST);
@ -900,7 +900,7 @@ struct hb_bit_set_t
/* The extra page_map length is necessary; can't just rely on vector here,
* since the next check would be tricked because a null page also has
* major==0, which we can't distinguish from an actualy major==0 page... */
unsigned i = last_page_lookup.get_relaxed ();
unsigned i = last_page_lookup;
if (likely (i < page_map.length))
{
auto &cached_page = page_map.arrayZ[i];
@ -924,7 +924,7 @@ struct hb_bit_set_t
page_map[i] = map;
}
last_page_lookup.set_relaxed (i);
last_page_lookup = i;
return &pages[page_map[i].index];
}
const page_t *page_for (hb_codepoint_t g) const
@ -934,7 +934,7 @@ struct hb_bit_set_t
/* The extra page_map length is necessary; can't just rely on vector here,
* since the next check would be tricked because a null page also has
* major==0, which we can't distinguish from an actualy major==0 page... */
unsigned i = last_page_lookup.get_relaxed ();
unsigned i = last_page_lookup;
if (likely (i < page_map.length))
{
auto &cached_page = page_map.arrayZ[i];
@ -946,7 +946,7 @@ struct hb_bit_set_t
if (!page_map.bfind (key, &i))
return nullptr;
last_page_lookup.set_relaxed (i);
last_page_lookup = i;
return &pages[page_map[i].index];
}
page_t &page_at (unsigned int i) { return pages[page_map[i].index]; }

@ -45,13 +45,13 @@ struct hb_cache_t
void clear ()
{
for (unsigned i = 0; i < ARRAY_LENGTH (values); i++)
values[i].set_relaxed (-1);
values[i] = -1;
}
bool get (unsigned int key, unsigned int *value) const
{
unsigned int k = key & ((1u<<cache_bits)-1);
unsigned int v = values[k].get_relaxed ();
unsigned int v = values[k];
if ((key_bits + value_bits - cache_bits == 8 * sizeof (hb_atomic_int_t) && v == (unsigned int) -1) ||
(v >> value_bits) != (key >> cache_bits))
return false;
@ -65,7 +65,7 @@ struct hb_cache_t
return false; /* Overflows */
unsigned int k = key & ((1u<<cache_bits)-1);
unsigned int v = ((key>>cache_bits)<<value_bits) | value;
values[k].set_relaxed (v);
values[k] = v;
return true;
}

@ -99,7 +99,7 @@ _hb_options_init ()
}
/* This is idempotent and threadsafe. */
_hb_options.set_relaxed (u.i);
_hb_options = u.i;
}

@ -67,12 +67,12 @@ hb_options ()
#endif
/* Make a local copy, so we can access bitfield threadsafely. */
hb_options_union_t u;
u.i = _hb_options.get_relaxed ();
u.i = _hb_options;
if (unlikely (!u.i))
{
_hb_options_init ();
u.i = _hb_options.get_relaxed ();
u.i = _hb_options;
}
return u.opts;

@ -132,7 +132,7 @@ hb_face_create_for_tables (hb_reference_table_func_t reference_table_func,
face->user_data = user_data;
face->destroy = destroy;
face->num_glyphs.set_relaxed (-1);
face->num_glyphs = -1;
face->data.init0 (face);
face->table.init0 (face);
@ -479,7 +479,7 @@ hb_face_set_upem (hb_face_t *face,
if (hb_object_is_immutable (face))
return;
face->upem.set_relaxed (upem);
face->upem = upem;
}
/**
@ -514,7 +514,7 @@ hb_face_set_glyph_count (hb_face_t *face,
if (hb_object_is_immutable (face))
return;
face->num_glyphs.set_relaxed (glyph_count);
face->num_glyphs = glyph_count;
}
/**

@ -83,7 +83,7 @@ struct hb_face_t
unsigned int get_upem () const
{
unsigned int ret = upem.get_relaxed ();
unsigned int ret = upem;
if (unlikely (!ret))
{
return load_upem ();
@ -93,7 +93,7 @@ struct hb_face_t
unsigned int get_num_glyphs () const
{
unsigned int ret = num_glyphs.get_relaxed ();
unsigned int ret = num_glyphs;
if (unlikely (ret == UINT_MAX))
return load_num_glyphs ();
return ret;

@ -144,14 +144,14 @@ struct hb_reference_count_t
{
mutable hb_atomic_int_t ref_count;
void init (int v = 1) { ref_count.set_relaxed (v); }
int get_relaxed () const { return ref_count.get_relaxed (); }
void init (int v = 1) { ref_count = v; }
int get_relaxed () const { return ref_count; }
int inc () const { return ref_count.inc (); }
int dec () const { return ref_count.dec (); }
void fini () { ref_count.set_relaxed (-0x0000DEAD); }
void fini () { ref_count = -0x0000DEAD; }
bool is_inert () const { return !ref_count.get_relaxed (); }
bool is_valid () const { return ref_count.get_relaxed () > 0; }
bool is_inert () const { return !ref_count; }
bool is_valid () const { return ref_count > 0; }
};
@ -233,7 +233,7 @@ template <typename Type>
static inline void hb_object_init (Type *obj)
{
obj->header.ref_count.init ();
obj->header.writable.set_relaxed (true);
obj->header.writable = true;
obj->header.user_data.init ();
}
template <typename Type>
@ -244,12 +244,12 @@ static inline bool hb_object_is_valid (const Type *obj)
template <typename Type>
static inline bool hb_object_is_immutable (const Type *obj)
{
return !obj->header.writable.get_relaxed ();
return !obj->header.writable;
}
template <typename Type>
static inline void hb_object_make_immutable (const Type *obj)
{
obj->header.writable.set_relaxed (false);
obj->header.writable = false;
}
template <typename Type>
static inline Type *hb_object_reference (Type *obj)

@ -276,7 +276,7 @@ struct indic_shape_plan_t
{
bool load_virama_glyph (hb_font_t *font, hb_codepoint_t *pglyph) const
{
hb_codepoint_t glyph = virama_glyph.get_relaxed ();
hb_codepoint_t glyph = virama_glyph;
if (unlikely (glyph == (hb_codepoint_t) -1))
{
if (!config->virama || !font->get_nominal_glyph (config->virama, &glyph))
@ -286,7 +286,7 @@ struct indic_shape_plan_t
/* Our get_nominal_glyph() function needs a font, so we can't get the virama glyph
* during shape planning... Instead, overwrite it here. */
virama_glyph.set_relaxed ((int) glyph);
virama_glyph = (int) glyph;
}
*pglyph = glyph;
@ -330,7 +330,7 @@ data_create_indic (const hb_ot_shape_plan_t *plan)
#ifndef HB_NO_UNISCRIBE_BUG_COMPATIBLE
indic_plan->uniscribe_bug_compatible = hb_options ().uniscribe_bug_compatible;
#endif
indic_plan->virama_glyph.set_relaxed (-1);
indic_plan->virama_glyph = -1;
/* Use zero-context would_substitute() matching for new-spec of the main
* Indic scripts, and scripts with one spec only, but not for old-specs.
@ -992,7 +992,7 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan,
* class of I_Cat(H) is desired but has been lost. */
/* We don't call load_virama_glyph(), since we know it's already
* loaded. */
hb_codepoint_t virama_glyph = indic_plan->virama_glyph.get_relaxed ();
hb_codepoint_t virama_glyph = indic_plan->virama_glyph;
if (virama_glyph)
{
for (unsigned int i = start; i < end; i++)

@ -307,12 +307,12 @@ hb_ot_tags_from_language (const char *lang_str,
hb_tag_t lang_tag = hb_tag_from_string (lang_str, first_len);
static hb_atomic_int_t last_tag_idx; /* Poor man's cache. */
unsigned tag_idx = last_tag_idx.get_relaxed ();
unsigned tag_idx = last_tag_idx;
if (likely (tag_idx < ot_languages_len && ot_languages[tag_idx].language == lang_tag) ||
hb_sorted_array (ot_languages, ot_languages_len).bfind (lang_tag, &tag_idx))
{
last_tag_idx.set_relaxed (tag_idx);
last_tag_idx = tag_idx;
unsigned int i;
while (tag_idx != 0 &&
ot_languages[tag_idx].language == ot_languages[tag_idx - 1].language)

@ -94,7 +94,7 @@ hb_face_t::load_num_glyphs () const
ret = hb_max (ret, load_num_glyphs_from_maxp (this));
num_glyphs.set_relaxed (ret);
num_glyphs = ret;
return ret;
}
@ -102,7 +102,7 @@ unsigned int
hb_face_t::load_upem () const
{
unsigned int ret = table.head->get_upem ();
upem.set_relaxed (ret);
upem = ret;
return ret;
}

Loading…
Cancel
Save