@ -259,17 +259,17 @@ VALUE secondary_map_mutex = Qnil;
// Lambda that will GC entries from the secondary map that are no longer present
// in the primary map.
VALUE gc_secondary_map = Qnil ;
VALUE gc_secondary_map_lambda = Qnil ;
ID length ;
extern VALUE weak_obj_cache ;
static void SecondaryMap_Init ( ) {
rb_gc_register_address ( & secondary_map ) ;
rb_gc_register_address ( & gc_secondary_map ) ;
rb_gc_register_address ( & gc_secondary_map_lambda ) ;
rb_gc_register_address ( & secondary_map_mutex ) ;
secondary_map = rb_hash_new ( ) ;
gc_secondary_map = rb_eval_string (
gc_secondary_map_lambda = rb_eval_string (
" ->(secondary, weak) { \n "
" secondary.delete_if { |k, v| !weak.key?(v) } \n "
" } \n " ) ;
@ -287,43 +287,61 @@ static void SecondaryMap_Init() {
// secondary map that are no longer present in the WeakMap. The logic of
// how often to perform this GC is an artbirary tuning parameter that
// represents a straightforward CPU/memory tradeoff.
//
// Requires: secondary_map_mutex is held.
static void SecondaryMap_MaybeGC ( ) {
PBRUBY_ASSERT ( rb_mutex_locked_p ( secondary_map_mutex ) = = Qtrue ) ;
size_t weak_len = NUM2ULL ( rb_funcall ( weak_obj_cache , length , 0 ) ) ;
size_t secondary_len = RHASH_SIZE ( secondary_map ) ;
if ( secondary_len < weak_len ) {
// Logically this case should not be possible: a valid entry cannot exist in
// the weak table unless there is a corresponding entry in the secondary
// table. It should *always* be the case that secondary_len >= weak_len.
//
// However ObjectSpace::WeakMap#length (and therefore weak_len) is
// unreliable: it overreports its true length by including non-live objects.
// However these non-live objects are not yielded in iteration, so we may
// have previously deleted them from the secondary map in a previous
// invocation of SecondaryMap_MaybeGC().
//
// In this case, we can't measure any waste, so we just return.
return ;
}
size_t waste = secondary_len - weak_len ;
PBRUBY_ASSERT ( secondary_len > = weak_len ) ;
// GC if we could remove at least 2000 entries or 20% of the table size
// (whichever is greater). Since the cost of the GC pass is O(N), we
// want to make sure that we condition this on overall table size, to
// avoid O(N^2) CPU costs.
size_t threshold = PBRUBY_MAX ( secondary_len * 0.2 , 2000 ) ;
if ( waste > threshold ) {
rb_funcall ( gc_secondary_map , rb_intern ( " call " ) , 2 , secondary_map , weak_obj_cache ) ;
rb_funcall ( gc_secondary_map_lambda , rb_intern ( " call " ) , 2 ,
secondary_map , weak_obj_cache ) ;
}
}
static VALUE SecondaryMap_Get ( VALUE key ) {
// Requires: secondary_map_mutex is held by this thread iff create == true.
static VALUE SecondaryMap_Get ( VALUE key , bool create ) {
PBRUBY_ASSERT ( ! create | | rb_mutex_locked_p ( secondary_map_mutex ) = = Qtrue ) ;
VALUE ret = rb_hash_lookup ( secondary_map , key ) ;
if ( ret = = Qnil ) {
rb_mutex_lock ( secondary_map_mutex ) ;
if ( ret = = Qnil & & create ) {
SecondaryMap_MaybeGC ( ) ;
ret = rb_eval_string ( " Object.new " ) ;
rb_hash_aset ( secondary_map , key , ret ) ;
rb_mutex_unlock ( secondary_map_mutex ) ;
}
return ret ;
}
# endif
static VALUE ObjectCache_GetKey ( const void * key ) {
// Requires: secondary_map_mutex is held by this thread iff create == true.
static VALUE ObjectCache_GetKey ( const void * key , bool create ) {
char buf [ sizeof ( key ) ] ;
memcpy ( & buf , & key , sizeof ( key ) ) ;
intptr_t key_int = ( intptr_t ) key ;
PBRUBY_ASSERT ( ( key_int & 3 ) = = 0 ) ;
VALUE ret = LL2NUM ( key_int > > 2 ) ;
# if USE_SECONDARY_MAP
ret = SecondaryMap_Get ( ret ) ;
ret = SecondaryMap_Get ( ret , create ) ;
# endif
return ret ;
}
@ -347,14 +365,20 @@ static void ObjectCache_Init() {
void ObjectCache_Add ( const void * key , VALUE val ) {
PBRUBY_ASSERT ( ObjectCache_Get ( key ) = = Qnil ) ;
VALUE key_rb = ObjectCache_GetKey ( key ) ;
# if USE_SECONDARY_MAP
rb_mutex_lock ( secondary_map_mutex ) ;
# endif
VALUE key_rb = ObjectCache_GetKey ( key , true ) ;
rb_funcall ( weak_obj_cache , item_set , 2 , key_rb , val ) ;
# if USE_SECONDARY_MAP
rb_mutex_unlock ( secondary_map_mutex ) ;
# endif
PBRUBY_ASSERT ( ObjectCache_Get ( key ) = = val ) ;
}
// Returns the cached object for this key, if any. Otherwise returns Qnil.
VALUE ObjectCache_Get ( const void * key ) {
VALUE key_rb = ObjectCache_GetKey ( key ) ;
VALUE key_rb = ObjectCache_GetKey ( key , false ) ;
return rb_funcall ( weak_obj_cache , item_get , 1 , key_rb ) ;
}