[atomic] Add get_relaxed / set_relaxed

To help TSan and be more "correct".
pull/1120/head
Behdad Esfahbod 6 years ago
parent 3dd1b88765
commit 4bc16aca47
  1. 37
      src/hb-atomic-private.hh
  2. 4
      src/hb-common.cc
  3. 42
      src/hb-debug.hh
  4. 12
      src/hb-object-private.hh
  5. 22
      src/hb-ot-shape-complex-indic.cc
  6. 28
      src/hb-private.hh

@ -51,7 +51,9 @@
/* C++11-style GCC primitives. */
typedef int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) __atomic_fetch_add (&(AI), (V), __ATOMIC_ACQ_REL)
#define hb_atomic_int_impl_add(AI, V) __atomic_fetch_add ((AI), (V), __ATOMIC_ACQ_REL)
#define hb_atomic_int_impl_set_relaxed(AI, V) __atomic_store_n ((AI), (V), __ATOMIC_RELAXED)
#define hb_atomic_int_impl_get_relaxed(AI) __atomic_load_n ((AI), __ATOMIC_RELAXED)
#define hb_atomic_ptr_impl_get(P) __atomic_load_n ((P), __ATOMIC_CONSUME)
static inline bool
@ -69,7 +71,9 @@ _hb_atomic_ptr_impl_cmplexch (const void **P, const void *O_, const void *N)
#include <atomic>
typedef int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) (reinterpret_cast<std::atomic<int> *> (&AI)->fetch_add ((V), std::memory_order_acq_rel))
#define hb_atomic_int_impl_add(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->fetch_add ((V), std::memory_order_acq_rel))
#define hb_atomic_int_impl_set_relaxed(AI, V) (reinterpret_cast<std::atomic<int> *> (AI)->store ((V), std::memory_order_relaxed))
#define hb_atomic_int_impl_get_relaxed(AI) (reinterpret_cast<std::atomic<int> *> (AI)->load (std::memory_order_relaxed))
#define hb_atomic_ptr_impl_get(P) (reinterpret_cast<std::atomic<void*> *> (P)->load (std::memory_order_consume))
static inline bool
@ -98,7 +102,7 @@ static inline void _HBMemoryBarrier (void) {
}
typedef LONG hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) InterlockedExchangeAdd (&(AI), (V))
#define hb_atomic_int_impl_add(AI, V) InterlockedExchangeAdd ((AI), (V))
#define hb_atomic_ptr_impl_get(P) (_HBMemoryBarrier (), (void *) *(P))
#define hb_atomic_ptr_impl_cmpexch(P,O,N) (InterlockedCompareExchangePointer ((void **) (P), (void *) (N), (void *) (O)) == (void *) (O))
@ -107,7 +111,7 @@ typedef LONG hb_atomic_int_impl_t;
#elif !defined(HB_NO_MT) && defined(HAVE_INTEL_ATOMIC_PRIMITIVES)
typedef int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) __sync_fetch_and_add (&(AI), (V))
#define hb_atomic_int_impl_add(AI, V) __sync_fetch_and_add ((AI), (V))
#define hb_atomic_ptr_impl_get(P) (void *) (__sync_synchronize (), *(P))
#define hb_atomic_ptr_impl_cmpexch(P,O,N) __sync_bool_compare_and_swap ((P), (O), (N))
@ -119,7 +123,7 @@ typedef int hb_atomic_int_impl_t;
#include <mbarrier.h>
typedef unsigned int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) ( ({__machine_rw_barrier ();}), atomic_add_int_nv (&(AI), (V)) - (V))
#define hb_atomic_int_impl_add(AI, V) ( ({__machine_rw_barrier ();}), atomic_add_int_nv ((AI), (V)) - (V))
#define hb_atomic_ptr_impl_get(P) ( ({__machine_rw_barrier ();}), (void *) *(P))
#define hb_atomic_ptr_impl_cmpexch(P,O,N) ( ({__machine_rw_barrier ();}), atomic_cas_ptr ((void **) (P), (void *) (O), (void *) (N)) == (void *) (O) ? true : false)
@ -136,7 +140,7 @@ typedef unsigned int hb_atomic_int_impl_t;
typedef int32_t hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) (OSAtomicAdd32Barrier ((V), &(AI)) - (V))
#define hb_atomic_int_impl_add(AI, V) (OSAtomicAdd32Barrier ((V), (AI)) - (V))
#define hb_atomic_ptr_impl_get(P) (OSMemoryBarrier (), (void *) *(P))
#if (MAC_OS_X_VERSION_MIN_REQUIRED > MAC_OS_X_VERSION_10_4 || __IPHONE_VERSION_MIN_REQUIRED >= 20100)
@ -169,7 +173,7 @@ static inline int _hb_compare_and_swaplp(volatile long* P, long O, long N) {
}
typedef int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) _hb_fetch_and_add (&(AI), (V))
#define hb_atomic_int_impl_add(AI, V) _hb_fetch_and_add ((AI), (V))
#define hb_atomic_ptr_impl_get(P) (__sync(), (void *) *(P))
#define hb_atomic_ptr_impl_cmpexch(P,O,N) _hb_compare_and_swaplp ((long*)(P), (long)(O), (long)(N))
@ -179,7 +183,7 @@ typedef int hb_atomic_int_impl_t;
#define HB_ATOMIC_INT_NIL 1 /* Warn that fallback implementation is in use. */
typedef volatile int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) (((AI) += (V)) - (V))
#define hb_atomic_int_impl_add(AI, V) ((*(AI) += (V)) - (V))
#define hb_atomic_ptr_impl_get(P) ((void *) *(P))
#define hb_atomic_ptr_impl_cmpexch(P,O,N) (* (void * volatile *) (P) == (void *) (O) ? (* (void * volatile *) (P) = (void *) (N), true) : false)
@ -188,7 +192,7 @@ typedef volatile int hb_atomic_int_impl_t;
#else /* HB_NO_MT */
typedef int hb_atomic_int_impl_t;
#define hb_atomic_int_impl_add(AI, V) (((AI) += (V)) - (V))
#define hb_atomic_int_impl_add(AI, V) ((*(AI) += (V)) - (V))
#define hb_atomic_ptr_impl_get(P) ((void *) *(P))
#define hb_atomic_ptr_impl_cmpexch(P,O,N) (* (void **) (P) == (void *) (O) ? (* (void **) (P) = (void *) (N), true) : false)
@ -200,15 +204,22 @@ typedef int hb_atomic_int_impl_t;
#ifndef HB_ATOMIC_INT_INIT
#define HB_ATOMIC_INT_INIT(V) {V}
#endif
#ifndef hb_atomic_int_impl_set_relaxed
#define hb_atomic_int_impl_set_relaxed(AI, V) ((AI) = (V))
#endif
#ifndef hb_atomic_int_impl_get_relaxed
#define hb_atomic_int_impl_get_relaxed(AI) (AI)
#endif
struct hb_atomic_int_t
{
mutable hb_atomic_int_impl_t v;
inline void set_unsafe (int v_) { v = v_; }
inline int get_unsafe (void) const { return v; }
inline int inc (void) { return hb_atomic_int_impl_add (v, 1); }
inline int dec (void) { return hb_atomic_int_impl_add (v, -1); }
inline void set_relaxed (int v_) { hb_atomic_int_impl_set_relaxed (&v, v_); }
inline int get_relaxed (void) const { return hb_atomic_int_impl_get_relaxed (&v); }
inline int inc (void) { return hb_atomic_int_impl_add (&v, 1); }
inline int dec (void) { return hb_atomic_int_impl_add (&v, -1); }
};

@ -37,7 +37,7 @@
/* hb_options_t */
hb_options_union_t _hb_options;
hb_atomic_int_t _hb_options;
void
_hb_options_init (void)
@ -50,7 +50,7 @@ _hb_options_init (void)
u.opts.uniscribe_bug_compatible = c && strstr (c, "uniscribe-bug-compatible");
/* This is idempotent and threadsafe. */
_hb_options = u;
_hb_options.set_relaxed (u.i);
}

@ -28,6 +28,7 @@
#define HB_DEBUG_HH
#include "hb-private.hh"
#include "hb-atomic-private.hh"
#include "hb-dsalgs.hh"
@ -35,6 +36,47 @@
#define HB_DEBUG 0
#endif
/*
* Global runtime options.
*/
struct hb_options_t
{
unsigned int unused : 1; /* In-case sign bit is here. */
unsigned int initialized : 1;
unsigned int uniscribe_bug_compatible : 1;
};
union hb_options_union_t {
int i;
hb_options_t opts;
};
static_assert ((sizeof (hb_atomic_int_t) >= sizeof (hb_options_union_t)), "");
HB_INTERNAL void
_hb_options_init (void);
extern HB_INTERNAL hb_atomic_int_t _hb_options;
static inline hb_options_t
hb_options (void)
{
/* Make a local copy, so we can access bitfield threadsafely. */
hb_options_union_t u;
u.i = _hb_options.get_relaxed ();
if (unlikely (!u.i))
_hb_options_init ();
return u.opts;
}
/*
* Debug output (needs enabling at compile time.)
*/
static inline bool
_hb_debug (unsigned int level,
unsigned int max_level)

@ -47,14 +47,14 @@ struct hb_reference_count_t
{
hb_atomic_int_t ref_count;
inline void init (int v) { ref_count.set_unsafe (v); }
inline int get_unsafe (void) const { return ref_count.get_unsafe (); }
inline void init (int v) { ref_count.set_relaxed (v); }
inline int get_relaxed (void) const { return ref_count.get_relaxed (); }
inline int inc (void) { return ref_count.inc (); }
inline int dec (void) { return ref_count.dec (); }
inline void fini (void) { ref_count.set_unsafe (HB_REFERENCE_COUNT_POISON_VALUE); }
inline void fini (void) { ref_count.set_relaxed (HB_REFERENCE_COUNT_POISON_VALUE); }
inline bool is_inert (void) const { return ref_count.get_unsafe () == HB_REFERENCE_COUNT_INERT_VALUE; }
inline bool is_valid (void) const { return ref_count.get_unsafe () > 0; }
inline bool is_inert (void) const { return ref_count.get_relaxed () == HB_REFERENCE_COUNT_INERT_VALUE; }
inline bool is_valid (void) const { return ref_count.get_relaxed () > 0; }
};
@ -111,7 +111,7 @@ static inline void hb_object_trace (const Type *obj, const char *function)
DEBUG_MSG (OBJECT, (void *) obj,
"%s refcount=%d",
function,
obj ? obj->header.ref_count.get_unsafe () : 0);
obj ? obj->header.ref_count.get_relaxed () : 0);
}
template <typename Type>

@ -251,10 +251,10 @@ struct indic_shape_plan_t
{
ASSERT_POD ();
inline bool get_virama_glyph (hb_font_t *font, hb_codepoint_t *pglyph) const
inline bool load_virama_glyph (hb_font_t *font, hb_codepoint_t *pglyph) const
{
hb_codepoint_t glyph = virama_glyph;
if (unlikely (virama_glyph == (hb_codepoint_t) -1))
hb_codepoint_t glyph = virama_glyph.get_relaxed ();
if (unlikely (glyph == (hb_codepoint_t) -1))
{
if (!config->virama || !font->get_nominal_glyph (config->virama, &glyph))
glyph = 0;
@ -262,8 +262,8 @@ struct indic_shape_plan_t
* Maybe one day... */
/* Our get_nominal_glyph() function needs a font, so we can't get the virama glyph
* during shape planning... Instead, overwrite it here. It's safe. Don't worry! */
virama_glyph = glyph;
* during shape planning... Instead, overwrite it here. */
virama_glyph.set_relaxed ((int) glyph);
}
*pglyph = glyph;
@ -273,7 +273,7 @@ struct indic_shape_plan_t
const indic_config_t *config;
bool is_old_spec;
mutable hb_codepoint_t virama_glyph;
mutable hb_atomic_int_t virama_glyph;
would_substitute_feature_t rphf;
would_substitute_feature_t pref;
@ -298,7 +298,7 @@ data_create_indic (const hb_ot_shape_plan_t *plan)
}
indic_plan->is_old_spec = indic_plan->config->has_old_spec && ((plan->map.chosen_script[0] & 0x000000FFu) != '2');
indic_plan->virama_glyph = (hb_codepoint_t) -1;
indic_plan->virama_glyph.set_relaxed (-1);
/* Use zero-context would_substitute() matching for new-spec of the main
* Indic scripts, and scripts with one spec only, but not for old-specs.
@ -419,7 +419,7 @@ update_consonant_positions (const hb_ot_shape_plan_t *plan,
return;
hb_codepoint_t virama;
if (indic_plan->get_virama_glyph (font, &virama))
if (indic_plan->load_virama_glyph (font, &virama))
{
hb_face_t *face = font->face;
unsigned int count = buffer->len;
@ -1040,9 +1040,11 @@ final_reordering_syllable (const hb_ot_shape_plan_t *plan,
* phase, and that might have messed up our properties. Recover
* from a particular case of that where we're fairly sure that a
* class of OT_H is desired but has been lost. */
if (indic_plan->virama_glyph)
/* We don't call load_virama_glyph(), since we know it's already
* loaded. */
hb_codepoint_t virama_glyph = indic_plan->virama_glyph.get_relaxed ();
if (virama_glyph)
{
unsigned int virama_glyph = indic_plan->virama_glyph;
for (unsigned int i = start; i < end; i++)
if (info[i].codepoint == virama_glyph &&
_hb_glyph_info_ligated (&info[i]) &&

@ -525,34 +525,6 @@ hb_in_ranges (T u, T lo1, T hi1, T lo2, T hi2, T lo3, T hi3)
#define FLAG_RANGE(x,y) (ASSERT_STATIC_EXPR_ZERO ((x) < (y)) + FLAG(y+1) - FLAG(x))
/* Global runtime options. */
struct hb_options_t
{
unsigned int initialized : 1;
unsigned int uniscribe_bug_compatible : 1;
};
union hb_options_union_t {
unsigned int i;
hb_options_t opts;
};
static_assert ((sizeof (int) == sizeof (hb_options_union_t)), "");
HB_INTERNAL void
_hb_options_init (void);
extern HB_INTERNAL hb_options_union_t _hb_options;
static inline hb_options_t
hb_options (void)
{
if (unlikely (!_hb_options.i))
_hb_options_init ();
return _hb_options.opts;
}
/* Size signifying variable-sized array */
#define VAR 1

Loading…
Cancel
Save