|
|
@ -80,17 +80,22 @@ typedef struct grpc_mdelem_data { |
|
|
|
this bit set in their integer value */ |
|
|
|
this bit set in their integer value */ |
|
|
|
#define GRPC_MDELEM_STORAGE_INTERNED_BIT 1 |
|
|
|
#define GRPC_MDELEM_STORAGE_INTERNED_BIT 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* External and static storage metadata has no refcount to ref/unref. Allocated
|
|
|
|
|
|
|
|
* and interned metadata do have a refcount. Metadata ref and unref methods use |
|
|
|
|
|
|
|
* a switch statement on this enum to determine which behaviour to execute. |
|
|
|
|
|
|
|
* Keeping the no-ref cases together and the ref-cases together leads to |
|
|
|
|
|
|
|
* slightly better code generation (9 inlined instructions rather than 10). */ |
|
|
|
typedef enum { |
|
|
|
typedef enum { |
|
|
|
/* memory pointed to by grpc_mdelem::payload is owned by an external system */ |
|
|
|
/* memory pointed to by grpc_mdelem::payload is owned by an external system */ |
|
|
|
GRPC_MDELEM_STORAGE_EXTERNAL = 0, |
|
|
|
GRPC_MDELEM_STORAGE_EXTERNAL = 0, |
|
|
|
/* memory pointed to by grpc_mdelem::payload is interned by the metadata
|
|
|
|
/* memory is in the static metadata table */ |
|
|
|
system */ |
|
|
|
GRPC_MDELEM_STORAGE_STATIC = GRPC_MDELEM_STORAGE_INTERNED_BIT, |
|
|
|
GRPC_MDELEM_STORAGE_INTERNED = GRPC_MDELEM_STORAGE_INTERNED_BIT, |
|
|
|
|
|
|
|
/* memory pointed to by grpc_mdelem::payload is allocated by the metadata
|
|
|
|
/* memory pointed to by grpc_mdelem::payload is allocated by the metadata
|
|
|
|
system */ |
|
|
|
system */ |
|
|
|
GRPC_MDELEM_STORAGE_ALLOCATED = 2, |
|
|
|
GRPC_MDELEM_STORAGE_ALLOCATED = 2, |
|
|
|
/* memory is in the static metadata table */ |
|
|
|
/* memory pointed to by grpc_mdelem::payload is interned by the metadata
|
|
|
|
GRPC_MDELEM_STORAGE_STATIC = 2 | GRPC_MDELEM_STORAGE_INTERNED_BIT, |
|
|
|
system */ |
|
|
|
|
|
|
|
GRPC_MDELEM_STORAGE_INTERNED = 2 | GRPC_MDELEM_STORAGE_INTERNED_BIT, |
|
|
|
} grpc_mdelem_data_storage; |
|
|
|
} grpc_mdelem_data_storage; |
|
|
|
|
|
|
|
|
|
|
|
struct grpc_mdelem { |
|
|
|
struct grpc_mdelem { |
|
|
@ -196,17 +201,17 @@ class StaticMetadata { |
|
|
|
uint32_t hash_; |
|
|
|
uint32_t hash_; |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
class InternedMetadata { |
|
|
|
class RefcountedMdBase { |
|
|
|
public: |
|
|
|
public: |
|
|
|
struct BucketLink { |
|
|
|
RefcountedMdBase(const grpc_slice& key, const grpc_slice& value) |
|
|
|
explicit BucketLink(InternedMetadata* md) : next(md) {} |
|
|
|
: key_(key), value_(value), refcnt_(1) {} |
|
|
|
|
|
|
|
RefcountedMdBase(const grpc_slice& key, const grpc_slice& value, |
|
|
|
InternedMetadata* next = nullptr; |
|
|
|
uint32_t hash) |
|
|
|
}; |
|
|
|
: key_(key), value_(value), refcnt_(1), hash_(hash) {} |
|
|
|
|
|
|
|
|
|
|
|
InternedMetadata(const grpc_slice& key, const grpc_slice& value, |
|
|
|
const grpc_slice& key() const { return key_; } |
|
|
|
uint32_t hash, InternedMetadata* next); |
|
|
|
const grpc_slice& value() const { return value_; } |
|
|
|
~InternedMetadata(); |
|
|
|
uint32_t hash() { return hash_; } |
|
|
|
|
|
|
|
|
|
|
|
#ifndef NDEBUG |
|
|
|
#ifndef NDEBUG |
|
|
|
void Ref(const char* file, int line) { |
|
|
|
void Ref(const char* file, int line) { |
|
|
@ -218,92 +223,65 @@ class InternedMetadata { |
|
|
|
grpc_mdelem_trace_unref(this, key_, value_, RefValue(), file, line); |
|
|
|
grpc_mdelem_trace_unref(this, key_, value_, RefValue(), file, line); |
|
|
|
return Unref(); |
|
|
|
return Unref(); |
|
|
|
} |
|
|
|
} |
|
|
|
#else |
|
|
|
#endif |
|
|
|
// We define a naked Ref() in the else-clause to make sure we don't
|
|
|
|
|
|
|
|
// inadvertently skip the assert on debug builds.
|
|
|
|
|
|
|
|
void Ref() { |
|
|
|
void Ref() { |
|
|
|
/* we can assume the ref count is >= 1 as the application is calling
|
|
|
|
/* we can assume the ref count is >= 1 as the application is calling
|
|
|
|
this function - meaning that no adjustment to mdtab_free is necessary, |
|
|
|
this function - meaning that no adjustment to mdtab_free is necessary, |
|
|
|
simplifying the logic here to be just an atomic increment */ |
|
|
|
simplifying the logic here to be just an atomic increment */ |
|
|
|
refcnt_.FetchAdd(1, MemoryOrder::RELAXED); |
|
|
|
refcnt_.FetchAdd(1, MemoryOrder::RELAXED); |
|
|
|
} |
|
|
|
} |
|
|
|
#endif // ifndef NDEBUG
|
|
|
|
|
|
|
|
bool Unref() { |
|
|
|
bool Unref() { |
|
|
|
const intptr_t prior = refcnt_.FetchSub(1, MemoryOrder::ACQ_REL); |
|
|
|
const intptr_t prior = refcnt_.FetchSub(1, MemoryOrder::ACQ_REL); |
|
|
|
GPR_DEBUG_ASSERT(prior > 0); |
|
|
|
GPR_DEBUG_ASSERT(prior > 0); |
|
|
|
return prior == 1; |
|
|
|
return prior == 1; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void RefWithShardLocked(mdtab_shard* shard); |
|
|
|
protected: |
|
|
|
const grpc_slice& key() const { return key_; } |
|
|
|
intptr_t RefValue() { return refcnt_.Load(MemoryOrder::RELAXED); } |
|
|
|
const grpc_slice& value() const { return value_; } |
|
|
|
|
|
|
|
UserData* user_data() { return &user_data_; } |
|
|
|
|
|
|
|
uint32_t hash() { return hash_; } |
|
|
|
|
|
|
|
InternedMetadata* bucket_next() { return link_.next; } |
|
|
|
|
|
|
|
void set_bucket_next(InternedMetadata* md) { link_.next = md; } |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static size_t CleanupLinkedMetadata(BucketLink* head); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
|
|
|
bool AllRefsDropped() { return refcnt_.Load(MemoryOrder::ACQUIRE) == 0; } |
|
|
|
bool AllRefsDropped() { return refcnt_.Load(MemoryOrder::ACQUIRE) == 0; } |
|
|
|
bool FirstRef() { return refcnt_.FetchAdd(1, MemoryOrder::RELAXED) == 0; } |
|
|
|
bool FirstRef() { return refcnt_.FetchAdd(1, MemoryOrder::RELAXED) == 0; } |
|
|
|
intptr_t RefValue() { return refcnt_.Load(MemoryOrder::RELAXED); } |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
private: |
|
|
|
/* must be byte compatible with grpc_mdelem_data */ |
|
|
|
/* must be byte compatible with grpc_mdelem_data */ |
|
|
|
grpc_slice key_; |
|
|
|
grpc_slice key_; |
|
|
|
grpc_slice value_; |
|
|
|
grpc_slice value_; |
|
|
|
|
|
|
|
|
|
|
|
/* private only data */ |
|
|
|
|
|
|
|
uint32_t hash_; |
|
|
|
|
|
|
|
grpc_core::Atomic<intptr_t> refcnt_; |
|
|
|
grpc_core::Atomic<intptr_t> refcnt_; |
|
|
|
|
|
|
|
uint32_t hash_ = 0; |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
UserData user_data_; |
|
|
|
class InternedMetadata : public RefcountedMdBase { |
|
|
|
|
|
|
|
public: |
|
|
|
|
|
|
|
struct BucketLink { |
|
|
|
|
|
|
|
explicit BucketLink(InternedMetadata* md) : next(md) {} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
InternedMetadata* next = nullptr; |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
InternedMetadata(const grpc_slice& key, const grpc_slice& value, |
|
|
|
|
|
|
|
uint32_t hash, InternedMetadata* next); |
|
|
|
|
|
|
|
~InternedMetadata(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void RefWithShardLocked(mdtab_shard* shard); |
|
|
|
|
|
|
|
UserData* user_data() { return &user_data_; } |
|
|
|
|
|
|
|
InternedMetadata* bucket_next() { return link_.next; } |
|
|
|
|
|
|
|
void set_bucket_next(InternedMetadata* md) { link_.next = md; } |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static size_t CleanupLinkedMetadata(BucketLink* head); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
private: |
|
|
|
|
|
|
|
UserData user_data_; |
|
|
|
BucketLink link_; |
|
|
|
BucketLink link_; |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
/* Shadow structure for grpc_mdelem_data for allocated elements */ |
|
|
|
/* Shadow structure for grpc_mdelem_data for allocated elements */ |
|
|
|
class AllocatedMetadata { |
|
|
|
class AllocatedMetadata : public RefcountedMdBase { |
|
|
|
public: |
|
|
|
public: |
|
|
|
AllocatedMetadata(const grpc_slice& key, const grpc_slice& value); |
|
|
|
AllocatedMetadata(const grpc_slice& key, const grpc_slice& value); |
|
|
|
~AllocatedMetadata(); |
|
|
|
~AllocatedMetadata(); |
|
|
|
|
|
|
|
|
|
|
|
const grpc_slice& key() const { return key_; } |
|
|
|
|
|
|
|
const grpc_slice& value() const { return value_; } |
|
|
|
|
|
|
|
UserData* user_data() { return &user_data_; } |
|
|
|
UserData* user_data() { return &user_data_; } |
|
|
|
|
|
|
|
|
|
|
|
#ifndef NDEBUG |
|
|
|
|
|
|
|
void Ref(const char* file, int line) { |
|
|
|
|
|
|
|
grpc_mdelem_trace_ref(this, key_, value_, RefValue(), file, line); |
|
|
|
|
|
|
|
Ref(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
bool Unref(const char* file, int line) { |
|
|
|
|
|
|
|
grpc_mdelem_trace_unref(this, key_, value_, RefValue(), file, line); |
|
|
|
|
|
|
|
return Unref(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
#endif // ifndef NDEBUG
|
|
|
|
|
|
|
|
void Ref() { |
|
|
|
|
|
|
|
/* we can assume the ref count is >= 1 as the application is calling
|
|
|
|
|
|
|
|
this function - meaning that no adjustment to mdtab_free is necessary, |
|
|
|
|
|
|
|
simplifying the logic here to be just an atomic increment */ |
|
|
|
|
|
|
|
refcnt_.FetchAdd(1, MemoryOrder::RELAXED); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
bool Unref() { |
|
|
|
|
|
|
|
const intptr_t prior = refcnt_.FetchSub(1, MemoryOrder::ACQ_REL); |
|
|
|
|
|
|
|
GPR_DEBUG_ASSERT(prior > 0); |
|
|
|
|
|
|
|
return prior == 1; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
private: |
|
|
|
private: |
|
|
|
intptr_t RefValue() { return refcnt_.Load(MemoryOrder::RELAXED); } |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* must be byte compatible with grpc_mdelem_data */ |
|
|
|
|
|
|
|
grpc_slice key_; |
|
|
|
|
|
|
|
grpc_slice value_; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* private only data */ |
|
|
|
|
|
|
|
grpc_core::Atomic<intptr_t> refcnt_; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
UserData user_data_; |
|
|
|
UserData user_data_; |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
@ -348,24 +326,35 @@ inline grpc_mdelem grpc_mdelem_ref(grpc_mdelem gmd) { |
|
|
|
|
|
|
|
|
|
|
|
#ifndef NDEBUG |
|
|
|
#ifndef NDEBUG |
|
|
|
#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s), __FILE__, __LINE__) |
|
|
|
#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s), __FILE__, __LINE__) |
|
|
|
void grpc_mdelem_do_unref(grpc_mdelem gmd, const char* file, int line); |
|
|
|
void grpc_mdelem_on_final_unref(grpc_mdelem_data_storage storage, void* ptr, |
|
|
|
|
|
|
|
uint32_t hash, const char* file, int line); |
|
|
|
inline void grpc_mdelem_unref(grpc_mdelem gmd, const char* file, int line) { |
|
|
|
inline void grpc_mdelem_unref(grpc_mdelem gmd, const char* file, int line) { |
|
|
|
#else |
|
|
|
#else |
|
|
|
#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s)) |
|
|
|
#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s)) |
|
|
|
void grpc_mdelem_do_unref(grpc_mdelem gmd); |
|
|
|
void grpc_mdelem_on_final_unref(grpc_mdelem_data_storage storage, void* ptr, |
|
|
|
|
|
|
|
uint32_t hash); |
|
|
|
inline void grpc_mdelem_unref(grpc_mdelem gmd) { |
|
|
|
inline void grpc_mdelem_unref(grpc_mdelem gmd) { |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
switch (GRPC_MDELEM_STORAGE(gmd)) { |
|
|
|
const grpc_mdelem_data_storage storage = GRPC_MDELEM_STORAGE(gmd); |
|
|
|
|
|
|
|
switch (storage) { |
|
|
|
case GRPC_MDELEM_STORAGE_EXTERNAL: |
|
|
|
case GRPC_MDELEM_STORAGE_EXTERNAL: |
|
|
|
case GRPC_MDELEM_STORAGE_STATIC: |
|
|
|
case GRPC_MDELEM_STORAGE_STATIC: |
|
|
|
return; |
|
|
|
return; |
|
|
|
case GRPC_MDELEM_STORAGE_INTERNED: |
|
|
|
case GRPC_MDELEM_STORAGE_INTERNED: |
|
|
|
case GRPC_MDELEM_STORAGE_ALLOCATED: |
|
|
|
case GRPC_MDELEM_STORAGE_ALLOCATED: |
|
|
|
|
|
|
|
auto* md = |
|
|
|
|
|
|
|
reinterpret_cast<grpc_core::RefcountedMdBase*> GRPC_MDELEM_DATA(gmd); |
|
|
|
|
|
|
|
/* once the refcount hits zero, some other thread can come along and
|
|
|
|
|
|
|
|
free an interned md at any time: it's unsafe from this point on to |
|
|
|
|
|
|
|
access it so we read the hash now. */ |
|
|
|
|
|
|
|
uint32_t hash = md->hash(); |
|
|
|
|
|
|
|
if (GPR_UNLIKELY(md->Unref())) { |
|
|
|
#ifndef NDEBUG |
|
|
|
#ifndef NDEBUG |
|
|
|
grpc_mdelem_do_unref(gmd, file, line); |
|
|
|
grpc_mdelem_on_final_unref(storage, md, hash, file, line); |
|
|
|
#else |
|
|
|
#else |
|
|
|
grpc_mdelem_do_unref(gmd); |
|
|
|
grpc_mdelem_on_final_unref(storage, md, hash); |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
} |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|