Added specializations for grpc_mdelem_create.

In several cases, we create grpc mdelem structures using known-static
metadata inputs. Furthermore, in several cases we create a slice on
the heap (e.g. grpc_slice_from_copied_buffer) where we know we are
transferring refcount ownership. In several cases, then, we can:

1) Avoid unnecessary ref/unref operations that are no-ops (for static
   slices) or superfluous (if we're transferring ownership).
2) Avoid unnecessarily comprehensive calls to grpc_slice_eq (since
   they'd only be called with static or interned slice arguments,
   which by construction would have equal refcounts if they were
   in fact equal.
3) Avoid unnecessary checks to see if a slice is interned (when we
   know that they are).

To avoid polluting the internal API, we introduce the notion of
strongly-typed grpc_slice objects. We draw a distinction between
Internal (interned and static-storage) slices and Extern (inline and
non-statically allocated). We introduce overloads to
grpc_mdelem_create() and grpc_mdelem_from_slices() for the fastpath
cases identified above based on these slice types.

From the programmer's point of view, though, nothing changes - they
need only use grpc_mdelem_create() and grpc_mdelem_from_slices() as
before, and the appropriate fastpath will be picked based on type
inference. If no special knowledge exists for the slice type (i.e. we
pass in generic grpc_slice objects), the slowpath method will still
always return correct behaviour.

This is good for:
- Roughly 1-3% reduction in CPU time for several unary/streaming
  ping pong fullstack microbenchmarks.
- Reduction of about 15-20% in CPU time for some hpack parser
  microbenchmarks.
- 10-12% reduction of CPU time for metadata microbenchmarks involving
  interned slice comparisons.
pull/19427/head
Arjun Roy 6 years ago
parent a163f48fad
commit 557446a11e
  1. 4
      src/core/ext/filters/client_channel/client_channel.cc
  2. 9
      src/core/ext/filters/http/client/http_client_filter.cc
  3. 6
      src/core/ext/filters/http/client_authority_filter.cc
  4. 2
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  5. 4
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  6. 73
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  7. 2
      src/core/ext/transport/inproc/inproc_transport.cc
  8. 8
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  9. 74
      src/core/lib/slice/slice.cc
  10. 169
      src/core/lib/slice/slice_intern.cc
  11. 17
      src/core/lib/slice/slice_internal.h
  12. 120
      src/core/lib/slice/slice_utils.h
  13. 12
      src/core/lib/surface/channel.cc
  14. 4
      src/core/lib/surface/lame_client.cc
  15. 2
      src/core/lib/transport/error_utils.cc
  16. 232
      src/core/lib/transport/metadata.cc
  17. 78
      src/core/lib/transport/metadata.h
  18. 1084
      src/core/lib/transport/static_metadata.cc
  19. 6
      src/core/lib/transport/static_metadata.h
  20. 2
      src/cpp/ext/filters/census/client_filter.cc
  21. 2
      src/cpp/ext/filters/census/server_filter.cc
  22. 76
      test/cpp/microbenchmarks/bm_metadata.cc
  23. 18
      tools/codegen/core/gen_static_metadata.py

@ -329,8 +329,8 @@ class CallData {
grpc_linked_mdelem* linked_mdelem = static_cast<grpc_linked_mdelem*>(
calld_->arena_->Alloc(sizeof(grpc_linked_mdelem)));
linked_mdelem->md = grpc_mdelem_from_slices(
grpc_slice_from_static_buffer_internal(key.data(), key.size()),
grpc_slice_from_static_buffer_internal(value.data(), value.size()));
grpc_core::ExternallyManagedSlice(key.data(), key.size()),
grpc_core::ExternallyManagedSlice(value.data(), value.size()));
GPR_ASSERT(grpc_metadata_batch_link_tail(batch_, linked_mdelem) ==
GRPC_ERROR_NONE);
}

@ -304,7 +304,7 @@ static grpc_error* update_path_for_get(grpc_call_element* elem,
estimated_len += grpc_base64_estimate_encoded_size(
batch->payload->send_message.send_message->length(), true /* url_safe */,
false /* multi_line */);
grpc_slice path_with_query_slice = GRPC_SLICE_MALLOC(estimated_len);
grpc_core::UnmanagedMemorySlice path_with_query_slice(estimated_len);
/* memcopy individual pieces into this slice */
char* write_ptr =
reinterpret_cast<char*> GRPC_SLICE_START_PTR(path_with_query_slice);
@ -514,13 +514,12 @@ static size_t max_payload_size_from_args(const grpc_channel_args* args) {
return kMaxPayloadSizeForGet;
}
static grpc_slice user_agent_from_args(const grpc_channel_args* args,
const char* transport_name) {
static grpc_core::ManagedMemorySlice user_agent_from_args(
const grpc_channel_args* args, const char* transport_name) {
gpr_strvec v;
size_t i;
int is_first = 1;
char* tmp;
grpc_slice result;
gpr_strvec_init(&v);
@ -558,7 +557,7 @@ static grpc_slice user_agent_from_args(const grpc_channel_args* args,
tmp = gpr_strvec_flatten(&v, nullptr);
gpr_strvec_destroy(&v);
result = grpc_slice_intern(grpc_slice_from_static_string_internal(tmp));
grpc_core::ManagedMemorySlice result(tmp);
gpr_free(tmp);
return result;

@ -44,7 +44,7 @@ struct call_data {
};
struct channel_data {
grpc_slice default_authority;
grpc_core::ManagedMemorySlice default_authority;
grpc_mdelem default_authority_mdelem;
};
@ -101,8 +101,8 @@ grpc_error* init_channel_elem(grpc_channel_element* elem,
return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"GRPC_ARG_DEFAULT_AUTHORITY channel arg. must be a string");
}
chand->default_authority = grpc_slice_intern(
grpc_slice_from_static_string_internal(default_authority_str));
chand->default_authority =
grpc_core::ManagedMemorySlice(default_authority_str);
chand->default_authority_mdelem = grpc_mdelem_create(
GRPC_MDSTR_AUTHORITY, chand->default_authority, nullptr);
GPR_ASSERT(!args->is_last);

@ -2152,7 +2152,7 @@ void grpc_chttp2_fake_status(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
&s->metadata_buffer[1],
grpc_mdelem_from_slices(
GRPC_MDSTR_GRPC_STATUS,
grpc_slice_from_copied_string(status_string))));
grpc_core::UnmanagedMemorySlice(status_string))));
if (!GRPC_SLICE_IS_EMPTY(slice)) {
GRPC_LOG_IF_ERROR(
"add_status_message",

@ -604,8 +604,8 @@ static void deadline_enc(grpc_chttp2_hpack_compressor* c, grpc_millis deadline,
grpc_mdelem mdelem;
grpc_http2_encode_timeout(deadline - grpc_core::ExecCtx::Get()->Now(),
timeout_str);
mdelem = grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_TIMEOUT,
grpc_slice_from_copied_string(timeout_str));
mdelem = grpc_mdelem_from_slices(
GRPC_MDSTR_GRPC_TIMEOUT, grpc_core::UnmanagedMemorySlice(timeout_str));
hpack_enc(c, mdelem, st);
GRPC_MDELEM_UNREF(mdelem);
}

@ -660,24 +660,32 @@ static grpc_error* on_hdr(grpc_chttp2_hpack_parser* p, grpc_mdelem md) {
return GRPC_ERROR_NONE;
}
static grpc_slice take_string(grpc_chttp2_hpack_parser* p,
grpc_chttp2_hpack_parser_string* str,
bool intern) {
grpc_slice s;
static grpc_core::UnmanagedMemorySlice take_string_extern(
grpc_chttp2_hpack_parser* p, grpc_chttp2_hpack_parser_string* str) {
grpc_core::UnmanagedMemorySlice s;
if (!str->copied) {
if (intern) {
s = grpc_slice_intern(str->data.referenced);
grpc_slice_unref_internal(str->data.referenced);
} else {
s = str->data.referenced;
}
GPR_DEBUG_ASSERT(!grpc_slice_is_interned(str->data.referenced));
s = static_cast<grpc_core::UnmanagedMemorySlice&>(str->data.referenced);
str->copied = true;
str->data.referenced = grpc_core::UnmanagedMemorySlice();
} else {
s = grpc_core::UnmanagedMemorySlice(str->data.copied.str,
str->data.copied.length);
}
str->data.copied.length = 0;
return s;
}
static grpc_core::ManagedMemorySlice take_string_intern(
grpc_chttp2_hpack_parser* p, grpc_chttp2_hpack_parser_string* str) {
grpc_core::ManagedMemorySlice s;
if (!str->copied) {
s = grpc_core::ManagedMemorySlice(&str->data.referenced);
grpc_slice_unref_internal(str->data.referenced);
str->copied = true;
str->data.referenced = grpc_empty_slice();
} else if (intern) {
s = grpc_slice_intern(grpc_slice_from_static_buffer_internal(
str->data.copied.str, str->data.copied.length));
} else {
s = grpc_slice_from_copied_buffer(str->data.copied.str,
s = grpc_core::ManagedMemorySlice(str->data.copied.str,
str->data.copied.length);
}
str->data.copied.length = 0;
@ -812,6 +820,12 @@ static grpc_mdelem get_precomputed_md_for_idx(grpc_chttp2_hpack_parser* p) {
return md;
}
static const grpc_core::ManagedMemorySlice& get_indexed_key(grpc_mdelem md) {
GPR_DEBUG_ASSERT(GRPC_MDELEM_IS_INTERNED(md));
return static_cast<const grpc_core::ManagedMemorySlice&>(
grpc_slice_ref_internal(GRPC_MDKEY(md)));
}
/* finish a literal header with incremental indexing */
static grpc_error* finish_lithdr_incidx(grpc_chttp2_hpack_parser* p,
const uint8_t* cur,
@ -819,8 +833,8 @@ static grpc_error* finish_lithdr_incidx(grpc_chttp2_hpack_parser* p,
GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX();
grpc_mdelem md = get_precomputed_md_for_idx(p);
grpc_error* err = on_hdr<true>(
p, grpc_mdelem_from_slices(grpc_slice_ref_internal(GRPC_MDKEY(md)),
take_string(p, &p->value, true)));
p, grpc_mdelem_from_slices(get_indexed_key(md),
take_string_intern(p, &p->value)));
if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
return parse_begin(p, cur, end);
}
@ -830,9 +844,9 @@ static grpc_error* finish_lithdr_incidx_v(grpc_chttp2_hpack_parser* p,
const uint8_t* cur,
const uint8_t* end) {
GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V();
grpc_error* err =
on_hdr<true>(p, grpc_mdelem_from_slices(take_string(p, &p->key, true),
take_string(p, &p->value, true)));
grpc_error* err = on_hdr<true>(
p, grpc_mdelem_from_slices(take_string_intern(p, &p->key),
take_string_intern(p, &p->value)));
if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
return parse_begin(p, cur, end);
}
@ -883,8 +897,8 @@ static grpc_error* finish_lithdr_notidx(grpc_chttp2_hpack_parser* p,
GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX();
grpc_mdelem md = get_precomputed_md_for_idx(p);
grpc_error* err = on_hdr<false>(
p, grpc_mdelem_from_slices(grpc_slice_ref_internal(GRPC_MDKEY(md)),
take_string(p, &p->value, false)));
p, grpc_mdelem_from_slices(get_indexed_key(md),
take_string_extern(p, &p->value)));
if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
return parse_begin(p, cur, end);
}
@ -895,8 +909,8 @@ static grpc_error* finish_lithdr_notidx_v(grpc_chttp2_hpack_parser* p,
const uint8_t* end) {
GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V();
grpc_error* err = on_hdr<false>(
p, grpc_mdelem_from_slices(take_string(p, &p->key, true),
take_string(p, &p->value, false)));
p, grpc_mdelem_from_slices(take_string_intern(p, &p->key),
take_string_extern(p, &p->value)));
if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
return parse_begin(p, cur, end);
}
@ -947,8 +961,8 @@ static grpc_error* finish_lithdr_nvridx(grpc_chttp2_hpack_parser* p,
GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX();
grpc_mdelem md = get_precomputed_md_for_idx(p);
grpc_error* err = on_hdr<false>(
p, grpc_mdelem_from_slices(grpc_slice_ref_internal(GRPC_MDKEY(md)),
take_string(p, &p->value, false)));
p, grpc_mdelem_from_slices(get_indexed_key(md),
take_string_extern(p, &p->value)));
if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
return parse_begin(p, cur, end);
}
@ -959,8 +973,8 @@ static grpc_error* finish_lithdr_nvridx_v(grpc_chttp2_hpack_parser* p,
const uint8_t* end) {
GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V();
grpc_error* err = on_hdr<false>(
p, grpc_mdelem_from_slices(take_string(p, &p->key, true),
take_string(p, &p->value, false)));
p, grpc_mdelem_from_slices(take_string_intern(p, &p->key),
take_string_extern(p, &p->value)));
if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
return parse_begin(p, cur, end);
}
@ -1510,13 +1524,12 @@ static grpc_error* parse_key_string(grpc_chttp2_hpack_parser* p,
static bool is_binary_literal_header(grpc_chttp2_hpack_parser* p) {
/* We know that either argument here is a reference counter slice.
* 1. If a result of grpc_slice_from_static_buffer_internal, the refcount is
* set to kNoopRefcount.
* 1. If it is a grpc_core::StaticSlice, the refcount is set to kNoopRefcount.
* 2. If it's p->key.data.referenced, then p->key.copied was set to false,
* which occurs in begin_parse_string() - where the refcount is set to
* p->current_slice_refcount, which is not null. */
return grpc_is_refcounted_slice_binary_header(
p->key.copied ? grpc_slice_from_static_buffer_internal(
p->key.copied ? grpc_core::ExternallyManagedSlice(
p->key.data.copied.str, p->key.data.copied.length)
: p->key.data.referenced);
}

@ -1203,7 +1203,7 @@ void inproc_transports_create(grpc_transport** server_transport,
*/
void grpc_inproc_transport_init(void) {
grpc_core::ExecCtx exec_ctx;
g_empty_slice = grpc_slice_from_static_buffer_internal(nullptr, 0);
g_empty_slice = grpc_core::ExternallyManagedSlice();
grpc_slice key_tmp = grpc_slice_from_static_string(":path");
g_fake_path_key = grpc_slice_intern(key_tmp);

@ -199,8 +199,8 @@ grpc_oauth2_token_fetcher_credentials_parse_server_response(
*token_lifetime = strtol(expires_in->value, nullptr, 10) * GPR_MS_PER_SEC;
if (!GRPC_MDISNULL(*token_md)) GRPC_MDELEM_UNREF(*token_md);
*token_md = grpc_mdelem_from_slices(
grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY),
grpc_slice_from_copied_string(new_access_token));
grpc_core::ExternallyManagedSlice(GRPC_AUTHORIZATION_METADATA_KEY),
grpc_core::UnmanagedMemorySlice(new_access_token));
status = GRPC_CREDENTIALS_OK;
}
@ -721,8 +721,8 @@ grpc_access_token_credentials::grpc_access_token_credentials(
gpr_asprintf(&token_md_value, "Bearer %s", access_token);
grpc_core::ExecCtx exec_ctx;
access_token_md_ = grpc_mdelem_from_slices(
grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY),
grpc_slice_from_copied_string(token_md_value));
grpc_core::ExternallyManagedSlice(GRPC_AUTHORIZATION_METADATA_KEY),
grpc_core::UnmanagedMemorySlice(token_md_value));
gpr_free(token_md_value);
}

@ -37,12 +37,7 @@ char* grpc_slice_to_c_string(grpc_slice slice) {
return out;
}
grpc_slice grpc_empty_slice(void) {
grpc_slice out;
out.refcount = nullptr;
out.data.inlined.length = 0;
return out;
}
grpc_slice grpc_empty_slice(void) { return grpc_core::UnmanagedMemorySlice(); }
grpc_slice grpc_slice_copy(grpc_slice s) {
grpc_slice out = GRPC_SLICE_MALLOC(GRPC_SLICE_LENGTH(s));
@ -112,11 +107,11 @@ size_t grpc_slice_memory_usage(grpc_slice s) {
}
grpc_slice grpc_slice_from_static_buffer(const void* s, size_t len) {
return grpc_slice_from_static_buffer_internal(s, len);
return grpc_core::ExternallyManagedSlice(s, len);
}
grpc_slice grpc_slice_from_static_string(const char* s) {
return grpc_slice_from_static_buffer_internal(s, strlen(s));
return grpc_core::ExternallyManagedSlice(s, strlen(s));
}
grpc_slice grpc_slice_new_with_user_data(void* p, size_t len,
@ -202,15 +197,29 @@ grpc_slice grpc_slice_new_with_len(void* p, size_t len,
return slice;
}
grpc_core::UnmanagedMemorySlice::UnmanagedMemorySlice(const char* source,
size_t length) {
if (length <= sizeof(data.inlined.bytes)) {
refcount = nullptr;
data.inlined.length = static_cast<uint8_t>(length);
} else {
HeapInit(length);
}
if (length > 0) {
memcpy(GRPC_SLICE_START_PTR(*this), source, length);
}
}
grpc_core::UnmanagedMemorySlice::UnmanagedMemorySlice(const char* source)
: grpc_core::UnmanagedMemorySlice::UnmanagedMemorySlice(source,
strlen(source)) {}
grpc_slice grpc_slice_from_copied_buffer(const char* source, size_t length) {
if (length == 0) return grpc_empty_slice();
grpc_slice slice = GRPC_SLICE_MALLOC(length);
memcpy(GRPC_SLICE_START_PTR(slice), source, length);
return slice;
return grpc_core::UnmanagedMemorySlice(source, length);
}
grpc_slice grpc_slice_from_copied_string(const char* source) {
return grpc_slice_from_copied_buffer(source, strlen(source));
return grpc_core::UnmanagedMemorySlice(source, strlen(source));
}
grpc_slice grpc_slice_from_moved_buffer(grpc_core::UniquePtr<char> p,
@ -261,8 +270,11 @@ class MallocRefCount {
} // namespace
grpc_slice grpc_slice_malloc_large(size_t length) {
grpc_slice slice;
return grpc_core::UnmanagedMemorySlice(
length, grpc_core::UnmanagedMemorySlice::ForceHeapAllocation());
}
void grpc_core::UnmanagedMemorySlice::HeapInit(size_t length) {
/* Memory layout used by the slice created here:
+-----------+----------------------------------------------------------+
@ -281,29 +293,30 @@ grpc_slice grpc_slice_malloc_large(size_t length) {
/* Build up the slice to be returned. */
/* The slices refcount points back to the allocated block. */
slice.refcount = rc->base_refcount();
refcount = rc->base_refcount();
/* The data bytes are placed immediately after the refcount struct */
slice.data.refcounted.bytes = reinterpret_cast<uint8_t*>(rc + 1);
data.refcounted.bytes = reinterpret_cast<uint8_t*>(rc + 1);
/* And the length of the block is set to the requested length */
slice.data.refcounted.length = length;
return slice;
data.refcounted.length = length;
}
grpc_slice grpc_slice_malloc(size_t length) {
grpc_slice slice;
return grpc_core::UnmanagedMemorySlice(length);
}
if (length > sizeof(slice.data.inlined.bytes)) {
return grpc_slice_malloc_large(length);
grpc_core::UnmanagedMemorySlice::UnmanagedMemorySlice(size_t length) {
if (length > sizeof(data.inlined.bytes)) {
HeapInit(length);
} else {
/* small slice: just inline the data */
slice.refcount = nullptr;
slice.data.inlined.length = static_cast<uint8_t>(length);
refcount = nullptr;
data.inlined.length = static_cast<uint8_t>(length);
}
return slice;
}
grpc_slice grpc_slice_sub_no_ref(grpc_slice source, size_t begin, size_t end) {
grpc_slice subset;
template <typename Slice>
static Slice sub_no_ref(const Slice& source, size_t begin, size_t end) {
Slice subset;
GPR_ASSERT(end >= begin);
@ -327,6 +340,15 @@ grpc_slice grpc_slice_sub_no_ref(grpc_slice source, size_t begin, size_t end) {
return subset;
}
grpc_slice grpc_slice_sub_no_ref(grpc_slice source, size_t begin, size_t end) {
return sub_no_ref(source, begin, end);
}
grpc_core::UnmanagedMemorySlice grpc_slice_sub_no_ref(
const grpc_core::UnmanagedMemorySlice& source, size_t begin, size_t end) {
return sub_no_ref(source, begin, end);
}
grpc_slice grpc_slice_sub(grpc_slice source, size_t begin, size_t end) {
grpc_slice subset;

@ -107,12 +107,10 @@ static void grow_shard(slice_shard* shard) {
shard->capacity = capacity;
}
static grpc_slice materialize(InternedSliceRefcount* s) {
grpc_slice slice;
slice.refcount = &s->base;
slice.data.refcounted.bytes = reinterpret_cast<uint8_t*>(s + 1);
slice.data.refcounted.length = s->length;
return slice;
grpc_core::InternedSlice::InternedSlice(InternedSliceRefcount* s) {
refcount = &s->base;
data.refcounted.bytes = reinterpret_cast<uint8_t*>(s + 1);
data.refcounted.length = s->length;
}
uint32_t grpc_slice_default_hash_impl(grpc_slice s) {
@ -152,57 +150,150 @@ grpc_slice grpc_slice_maybe_static_intern(grpc_slice slice,
}
grpc_slice grpc_slice_intern(grpc_slice slice) {
GPR_TIMER_SCOPE("grpc_slice_intern", 0);
if (GRPC_IS_STATIC_METADATA_STRING(slice)) {
return slice;
}
uint32_t hash = grpc_slice_hash_internal(slice);
/* TODO(arjunroy): At present, this is capable of returning either a static or
an interned slice. This yields weirdness like the constructor for
ManagedMemorySlice instantiating itself as an instance of a derived type
(StaticMetadataSlice or InternedSlice). Should reexamine. */
return grpc_core::ManagedMemorySlice(&slice);
}
// Attempt to see if the provided slice or string matches a static slice.
// SliceArgs... is either a const grpc_slice& or a string and length. In either
// case, hash is the pre-computed hash value.
//
// Returns: a matching static slice, or null.
template <class... SliceArgs>
static const grpc_core::StaticMetadataSlice* MatchStaticSlice(
uint32_t hash, SliceArgs&&... args) {
for (uint32_t i = 0; i <= max_static_metadata_hash_probe; i++) {
static_metadata_hash_ent ent =
static_metadata_hash[(hash + i) % GPR_ARRAY_SIZE(static_metadata_hash)];
if (ent.hash == hash && ent.idx < GRPC_STATIC_MDSTR_COUNT &&
grpc_slice_eq_static_interned(slice,
grpc_static_slice_table[ent.idx])) {
return grpc_static_slice_table[ent.idx];
grpc_static_slice_table[ent.idx].Equals(
std::forward<SliceArgs>(args)...)) {
return &grpc_static_slice_table[ent.idx];
}
}
return nullptr;
}
InternedSliceRefcount* s;
slice_shard* shard = &g_shards[SHARD_IDX(hash)];
// Helper methods to enable us to select appropriately overloaded slice methods
// whether we're dealing with a slice, or a buffer with length, when interning
// strings. Helpers for FindOrCreateInternedSlice().
static const void* GetBuffer(const void* buf, size_t len) { return buf; }
static size_t GetLength(const void* buf, size_t len) { return len; }
static const void* GetBuffer(const grpc_slice& slice) {
return GRPC_SLICE_START_PTR(slice);
}
static size_t GetLength(const grpc_slice& slice) {
return GRPC_SLICE_LENGTH(slice);
}
gpr_mu_lock(&shard->mu);
// Creates an interned slice for a string that does not currently exist in the
// intern table. SliceArgs... is either a const grpc_slice& or a string and
// length. In either case, hash is the pre-computed hash value. We must already
// hold the shard lock. Helper for FindOrCreateInternedSlice().
//
// Returns: a newly interned slice.
template <class... SliceArgs>
static InternedSliceRefcount* InternNewStringLocked(slice_shard* shard,
size_t shard_idx,
uint32_t hash,
SliceArgs&&... args) {
/* string data goes after the internal_string header */
size_t len = GetLength(std::forward<SliceArgs>(args)...);
const void* buffer = GetBuffer(std::forward<SliceArgs>(args)...);
InternedSliceRefcount* s =
static_cast<InternedSliceRefcount*>(gpr_malloc(sizeof(*s) + len));
new (s) grpc_core::InternedSliceRefcount(len, hash, shard->strs[shard_idx]);
memcpy(reinterpret_cast<char*>(s + 1), buffer, len);
shard->strs[shard_idx] = s;
shard->count++;
if (shard->count > shard->capacity * 2) {
grow_shard(shard);
}
return s;
}
// Attempt to see if the provided slice or string matches an existing interned
// slice. SliceArgs... is either a const grpc_slice& or a string and length. In
// either case, hash is the pre-computed hash value. We must already hold the
// shard lock. Helper for FindOrCreateInternedSlice().
//
// Returns: a pre-existing matching static slice, or null.
template <class... SliceArgs>
static InternedSliceRefcount* MatchInternedSliceLocked(uint32_t hash,
size_t idx,
SliceArgs&&... args) {
InternedSliceRefcount* s;
slice_shard* shard = &g_shards[SHARD_IDX(hash)];
/* search for an existing string */
size_t idx = TABLE_IDX(hash, shard->capacity);
for (s = shard->strs[idx]; s; s = s->bucket_next) {
if (s->hash == hash &&
grpc_slice_eq_static_interned(slice, materialize(s))) {
grpc_core::InternedSlice(s).Equals(std::forward<SliceArgs>(args)...)) {
if (s->refcnt.RefIfNonZero()) {
gpr_mu_unlock(&shard->mu);
return materialize(s);
return s;
}
}
}
return nullptr;
}
/* not found: create a new string */
/* string data goes after the internal_string header */
s = static_cast<InternedSliceRefcount*>(
gpr_malloc(sizeof(*s) + GRPC_SLICE_LENGTH(slice)));
new (s) grpc_core::InternedSliceRefcount(GRPC_SLICE_LENGTH(slice), hash,
shard->strs[idx]);
memcpy(reinterpret_cast<char*>(s + 1), GRPC_SLICE_START_PTR(slice),
GRPC_SLICE_LENGTH(slice));
shard->strs[idx] = s;
shard->count++;
if (shard->count > shard->capacity * 2) {
grow_shard(shard);
// Attempt to see if the provided slice or string matches an existing interned
// slice, and failing that, create an interned slice with its contents. Returns
// either the existing matching interned slice or the newly created one.
// SliceArgs... is either a const grpc_slice& or a string and length. In either
// case, hash is the pre-computed hash value. We do not hold the shard lock
// here, but do take it.
//
// Returns: an interned slice, either pre-existing/matched or newly created.
template <class... SliceArgs>
static InternedSliceRefcount* FindOrCreateInternedSlice(uint32_t hash,
SliceArgs&&... args) {
slice_shard* shard = &g_shards[SHARD_IDX(hash)];
gpr_mu_lock(&shard->mu);
const size_t idx = TABLE_IDX(hash, shard->capacity);
InternedSliceRefcount* s =
MatchInternedSliceLocked(hash, idx, std::forward<SliceArgs>(args)...);
if (s == nullptr) {
s = InternNewStringLocked(shard, idx, hash,
std::forward<SliceArgs>(args)...);
}
gpr_mu_unlock(&shard->mu);
return materialize(s);
return s;
}
grpc_core::ManagedMemorySlice::ManagedMemorySlice(const char* string)
: grpc_core::ManagedMemorySlice::ManagedMemorySlice(string,
strlen(string)) {}
grpc_core::ManagedMemorySlice::ManagedMemorySlice(const char* string,
size_t len) {
GPR_TIMER_SCOPE("grpc_slice_intern", 0);
const uint32_t hash = gpr_murmur_hash3(string, len, g_hash_seed);
const StaticMetadataSlice* static_slice = MatchStaticSlice(hash, string, len);
if (static_slice) {
*this = *static_slice;
} else {
*this =
grpc_core::InternedSlice(FindOrCreateInternedSlice(hash, string, len));
}
}
grpc_core::ManagedMemorySlice::ManagedMemorySlice(const grpc_slice* slice_ptr) {
GPR_TIMER_SCOPE("grpc_slice_intern", 0);
const grpc_slice& slice = *slice_ptr;
if (GRPC_IS_STATIC_METADATA_STRING(slice)) {
*this = static_cast<const grpc_core::StaticMetadataSlice&>(slice);
return;
}
const uint32_t hash = grpc_slice_hash_internal(slice);
const StaticMetadataSlice* static_slice = MatchStaticSlice(hash, slice);
if (static_slice) {
*this = *static_slice;
} else {
*this = grpc_core::InternedSlice(FindOrCreateInternedSlice(hash, slice));
}
}
void grpc_test_only_set_slice_hash_seed(uint32_t seed) {
@ -259,8 +350,8 @@ void grpc_slice_intern_shutdown(void) {
shard->count);
for (size_t j = 0; j < shard->capacity; j++) {
for (InternedSliceRefcount* s = shard->strs[j]; s; s = s->bucket_next) {
char* text =
grpc_dump_slice(materialize(s), GPR_DUMP_HEX | GPR_DUMP_ASCII);
char* text = grpc_dump_slice(grpc_core::InternedSlice(s),
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "LEAKED: %s", text);
gpr_free(text);
}

@ -30,6 +30,7 @@
#include "src/core/lib/gpr/murmur_hash.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/slice/slice_utils.h"
#include "src/core/lib/transport/static_metadata.h"
// Interned slices have specific fast-path operations for hashing. To inline
@ -95,6 +96,8 @@ extern uint32_t g_hash_seed;
// In total, this saves us roughly 1-2% latency for unary calls, with smaller
// calls benefitting. The effect is present, but not as useful, for larger calls
// where the cost of sending the data dominates.
// TODO(arjunroy): Investigate if this can be removed with strongly typed
// grpc_slices.
struct grpc_slice_refcount {
public:
enum class Type {
@ -314,17 +317,7 @@ grpc_slice grpc_slice_from_moved_string(grpc_core::UniquePtr<char> p);
// 0. All other slices will return the size of the allocated chars.
size_t grpc_slice_memory_usage(grpc_slice s);
inline grpc_slice grpc_slice_from_static_buffer_internal(const void* s,
size_t len) {
grpc_slice slice;
slice.refcount = &grpc_core::kNoopRefcount;
slice.data.refcounted.bytes = (uint8_t*)s;
slice.data.refcounted.length = len;
return slice;
}
inline grpc_slice grpc_slice_from_static_string_internal(const char* s) {
return grpc_slice_from_static_buffer_internal(s, strlen(s));
}
grpc_core::UnmanagedMemorySlice grpc_slice_sub_no_ref(
const grpc_core::UnmanagedMemorySlice& source, size_t begin, size_t end);
#endif /* GRPC_CORE_LIB_SLICE_SLICE_INTERNAL_H */

@ -21,6 +21,8 @@
#include <grpc/support/port_platform.h>
#include <cstring>
#include <grpc/slice.h>
// When we compare two slices, and we know the latter is not inlined, we can
@ -36,6 +38,7 @@
// x86-64/clang with differs().
int grpc_slice_differs_refcounted(const grpc_slice& a,
const grpc_slice& b_not_inline);
// When we compare two slices, and we *know* that one of them is static or
// interned, we can short circuit our slice equality function. The second slice
// here must be static or interned; slice a can be any slice, inlined or not.
@ -47,4 +50,121 @@ inline bool grpc_slice_eq_static_interned(const grpc_slice& a,
return !grpc_slice_differs_refcounted(a, b_static_interned);
}
// TODO(arjunroy): These type declarations ought to be in
// src/core/lib/slice/slice_internal.h instead; they are here due to a circular
// header depedency between slice_internal.h and
// src/core/lib/transport/metadata.h. We need to fix this circular reference and
// when we do, move these type declarations.
//
// Internal slice type declarations.
// Externally, a grpc_slice is a grpc_slice is a grpc_slice.
// Internally, we may have heap allocated slices, static slices, interned
// slices, and inlined slices. If we know the specific type of slice
// we're dealing with, we can save cycles (e.g. fast-paths when we know we don't
// need to take a reference on a slice). Rather than introducing new methods
// ad-hoc in these cases, we rely on type-system backed overloads to keep
// internal APIs clean.
//
// For each overload, the definition and layout of the underlying slice does not
// change; this is purely type-system information.
namespace grpc_core {
// There are two main types of slices: those that have their memory
// managed by the slice library and those that do not.
//
// The following types of slices are not managed:
// - inlined slices (i.e., refcount is null)
// - slices that have a custom refcount type (i.e., not STATIC or INTERNED)
// - slices where the memory is managed by some external agent. The slice is not
// ref-counted by grpc, and the programmer is responsible for ensuring the
// data is valid for the duration of the period that grpc may access it.
//
// The following types of slices are managed:
// - static metadata slices (i.e., refcount type is STATIC)
// - interned slices (i.e., refcount type is INTERNED)
//
// This categorization is reflected in the following hierarchy:
//
// - grpc_slice
// > - UnmanagedMemorySlice
// > - ExternallyManagedSlice
// - ManagedMemorySlice
// > - InternedSlice
// - StaticMetadataSlice
//
struct ManagedMemorySlice : public grpc_slice {
ManagedMemorySlice() {
refcount = nullptr;
data.refcounted.bytes = nullptr;
data.refcounted.length = 0;
}
explicit ManagedMemorySlice(const char* string);
ManagedMemorySlice(const char* buf, size_t len);
explicit ManagedMemorySlice(const grpc_slice* slice);
bool Equals(const grpc_slice& other) const {
if (refcount == other.refcount) {
return true;
}
return !grpc_slice_differs_refcounted(other, *this);
}
bool Equals(const char* buf, const size_t len) const {
return data.refcounted.length == len &&
memcmp(buf, data.refcounted.bytes, len) == 0;
}
};
struct UnmanagedMemorySlice : public grpc_slice {
// TODO(arjunroy): Can we use a default=false param instead of this enum?
enum class ForceHeapAllocation {};
UnmanagedMemorySlice() {
refcount = nullptr;
data.inlined.length = 0;
}
explicit UnmanagedMemorySlice(const char* source);
UnmanagedMemorySlice(const char* source, size_t length);
// The first constructor creates a slice that may be heap allocated, or
// inlined in the slice structure if length is small enough
// (< GRPC_SLICE_INLINED_SIZE). The second constructor forces heap alloc.
explicit UnmanagedMemorySlice(size_t length);
explicit UnmanagedMemorySlice(size_t length, const ForceHeapAllocation&) {
HeapInit(length);
}
private:
void HeapInit(size_t length);
};
extern grpc_slice_refcount kNoopRefcount;
struct ExternallyManagedSlice : public UnmanagedMemorySlice {
ExternallyManagedSlice()
: ExternallyManagedSlice(&kNoopRefcount, 0, nullptr) {}
explicit ExternallyManagedSlice(const char* s)
: ExternallyManagedSlice(s, strlen(s)) {}
ExternallyManagedSlice(const void* s, size_t len)
: ExternallyManagedSlice(
&kNoopRefcount, len,
reinterpret_cast<uint8_t*>(const_cast<void*>(s))) {}
ExternallyManagedSlice(grpc_slice_refcount* ref, size_t length,
uint8_t* bytes) {
refcount = ref;
data.refcounted.length = length;
data.refcounted.bytes = bytes;
}
};
struct StaticMetadataSlice : public ManagedMemorySlice {
StaticMetadataSlice(grpc_slice_refcount* ref, size_t length, uint8_t* bytes) {
refcount = ref;
data.refcounted.length = length;
data.refcounted.bytes = bytes;
}
};
struct InternedSliceRefcount;
struct InternedSlice : public ManagedMemorySlice {
explicit InternedSlice(InternedSliceRefcount* s);
};
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_SLICE_SLICE_UTILS_H */

@ -416,13 +416,11 @@ void* grpc_channel_register_call(grpc_channel* channel, const char* method,
GPR_ASSERT(!reserved);
grpc_core::ExecCtx exec_ctx;
rc->path = grpc_mdelem_from_slices(
GRPC_MDSTR_PATH,
grpc_slice_intern(grpc_slice_from_static_string(method)));
rc->path = grpc_mdelem_from_slices(GRPC_MDSTR_PATH,
grpc_core::ManagedMemorySlice(method));
rc->authority =
host ? grpc_mdelem_from_slices(
GRPC_MDSTR_AUTHORITY,
grpc_slice_intern(grpc_slice_from_static_string(host)))
host ? grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY,
grpc_core::ManagedMemorySlice(host))
: GRPC_MDNULL;
gpr_mu_lock(&channel->registered_call_mu);
rc->next = channel->registered_calls;
@ -513,5 +511,5 @@ grpc_mdelem grpc_channel_get_reffed_status_elem_slowpath(grpc_channel* channel,
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(i, tmp);
return grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_STATUS,
grpc_slice_from_copied_string(tmp));
grpc_core::UnmanagedMemorySlice(tmp));
}

@ -61,10 +61,10 @@ static void fill_metadata(grpc_call_element* elem, grpc_metadata_batch* mdb) {
char tmp[GPR_LTOA_MIN_BUFSIZE];
gpr_ltoa(chand->error_code, tmp);
calld->status.md = grpc_mdelem_from_slices(
GRPC_MDSTR_GRPC_STATUS, grpc_slice_from_copied_string(tmp));
GRPC_MDSTR_GRPC_STATUS, grpc_core::UnmanagedMemorySlice(tmp));
calld->details.md = grpc_mdelem_from_slices(
GRPC_MDSTR_GRPC_MESSAGE,
grpc_slice_from_copied_string(chand->error_message));
grpc_core::UnmanagedMemorySlice(chand->error_message));
calld->status.prev = calld->details.next = nullptr;
calld->status.next = &calld->details;
calld->details.prev = &calld->status;

@ -61,7 +61,7 @@ void grpc_error_get_status(grpc_error* error, grpc_millis deadline,
// 3) The resulting slice is statically known.
// 4) Said resulting slice is of length 0 ("").
// This means 3 movs, instead of 10s of instructions and a strlen.
*slice = grpc_slice_from_static_string_internal("");
*slice = grpc_core::ExternallyManagedSlice("");
}
if (http_error != nullptr) {
*http_error = GRPC_HTTP2_NO_ERROR;

@ -68,8 +68,8 @@ void grpc_mdelem_trace_ref(void* md, const grpc_slice& key,
char* key_str = grpc_slice_to_c_string(key);
char* value_str = grpc_slice_to_c_string(value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", md, refcnt,
refcnt + 1, key_str, value_str);
"mdelem REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", md,
refcnt, refcnt + 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
@ -82,7 +82,7 @@ void grpc_mdelem_trace_unref(void* md, const grpc_slice& key,
char* key_str = grpc_slice_to_c_string(key);
char* value_str = grpc_slice_to_c_string(value);
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"ELM UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", md,
"mdelem UNREF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", md,
refcnt, refcnt - 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
@ -112,14 +112,33 @@ AllocatedMetadata::AllocatedMetadata(const grpc_slice& key,
: RefcountedMdBase(grpc_slice_ref_internal(key),
grpc_slice_ref_internal(value)) {
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(key);
char* value_str = grpc_slice_to_c_string(value);
gpr_log(GPR_DEBUG, "ELM ALLOC:%p:%" PRIdPTR ": '%s' = '%s'", this,
RefValue(), key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
TraceAtStart("ALLOC_MD");
#endif
}
AllocatedMetadata::AllocatedMetadata(const grpc_slice& key,
const grpc_slice& value, const NoRefKey*)
: RefcountedMdBase(key, grpc_slice_ref_internal(value)) {
#ifndef NDEBUG
TraceAtStart("ALLOC_MD_NOREF_KEY");
#endif
}
AllocatedMetadata::AllocatedMetadata(
const grpc_core::ManagedMemorySlice& key,
const grpc_core::UnmanagedMemorySlice& value)
: RefcountedMdBase(key, value) {
#ifndef NDEBUG
TraceAtStart("ALLOC_MD_NOREF_KEY_VAL");
#endif
}
AllocatedMetadata::AllocatedMetadata(
const grpc_core::ExternallyManagedSlice& key,
const grpc_core::UnmanagedMemorySlice& value)
: RefcountedMdBase(key, value) {
#ifndef NDEBUG
TraceAtStart("ALLOC_MD_NOREF_KEY_VAL");
#endif
}
@ -134,6 +153,19 @@ AllocatedMetadata::~AllocatedMetadata() {
}
}
#ifndef NDEBUG
void grpc_core::RefcountedMdBase::TraceAtStart(const char* tag) {
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(key());
char* value_str = grpc_slice_to_c_string(value());
gpr_log(GPR_DEBUG, "mdelem %s:%p:%" PRIdPTR ": '%s' = '%s'", tag, this,
RefValue(), key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
}
#endif
InternedMetadata::InternedMetadata(const grpc_slice& key,
const grpc_slice& value, uint32_t hash,
InternedMetadata* next)
@ -141,14 +173,16 @@ InternedMetadata::InternedMetadata(const grpc_slice& key,
grpc_slice_ref_internal(value), hash),
link_(next) {
#ifndef NDEBUG
if (grpc_trace_metadata.enabled()) {
char* key_str = grpc_slice_to_c_string(key);
char* value_str = grpc_slice_to_c_string(value);
gpr_log(GPR_DEBUG, "ELM NEW:%p:%" PRIdPTR ": '%s' = '%s'", this,
RefValue(), key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
TraceAtStart("INTERNED_MD");
#endif
}
InternedMetadata::InternedMetadata(const grpc_slice& key,
const grpc_slice& value, uint32_t hash,
InternedMetadata* next, const NoRefKey*)
: RefcountedMdBase(key, grpc_slice_ref_internal(value), hash), link_(next) {
#ifndef NDEBUG
TraceAtStart("INTERNED_MD_NOREF_KEY");
#endif
}
@ -248,8 +282,8 @@ void InternedMetadata::RefWithShardLocked(mdtab_shard* shard) {
char* value_str = grpc_slice_to_c_string(value());
intptr_t value = RefValue();
gpr_log(__FILE__, __LINE__, GPR_LOG_SEVERITY_DEBUG,
"ELM REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", this, value,
value + 1, key_str, value_str);
"mdelem REF:%p:%" PRIdPTR "->%" PRIdPTR ": '%s' = '%s'", this,
value, value + 1, key_str, value_str);
gpr_free(key_str);
gpr_free(value_str);
}
@ -307,36 +341,100 @@ static void rehash_mdtab(mdtab_shard* shard) {
}
}
grpc_mdelem grpc_mdelem_create(
template <bool key_definitely_static, bool value_definitely_static = false>
static grpc_mdelem md_create_maybe_static(const grpc_slice& key,
const grpc_slice& value);
template <bool key_definitely_static>
static grpc_mdelem md_create_must_intern(const grpc_slice& key,
const grpc_slice& value,
uint32_t hash);
template <bool key_definitely_static, bool value_definitely_static = false>
static grpc_mdelem md_create(
const grpc_slice& key, const grpc_slice& value,
grpc_mdelem_data* compatible_external_backing_store) {
// Ensure slices are, in fact, static if we claimed they were.
GPR_DEBUG_ASSERT(!key_definitely_static ||
GRPC_IS_STATIC_METADATA_STRING(key));
GPR_DEBUG_ASSERT(!value_definitely_static ||
GRPC_IS_STATIC_METADATA_STRING(value));
const bool key_is_interned =
key_definitely_static || grpc_slice_is_interned(key);
const bool value_is_interned =
value_definitely_static || grpc_slice_is_interned(value);
// External storage if either slice is not interned and the caller already
// created a backing store. If no backing store, we allocate one.
if (!grpc_slice_is_interned(key) || !grpc_slice_is_interned(value)) {
if (!key_is_interned || !value_is_interned) {
if (compatible_external_backing_store != nullptr) {
// Caller provided backing store.
return GRPC_MAKE_MDELEM(compatible_external_backing_store,
GRPC_MDELEM_STORAGE_EXTERNAL);
} else {
// We allocate backing store.
return GRPC_MAKE_MDELEM(grpc_core::New<AllocatedMetadata>(key, value),
GRPC_MDELEM_STORAGE_ALLOCATED);
return key_definitely_static
? GRPC_MAKE_MDELEM(
grpc_core::New<AllocatedMetadata>(
key, value,
static_cast<const AllocatedMetadata::NoRefKey*>(
nullptr)),
GRPC_MDELEM_STORAGE_ALLOCATED)
: GRPC_MAKE_MDELEM(
grpc_core::New<AllocatedMetadata>(key, value),
GRPC_MDELEM_STORAGE_ALLOCATED);
}
}
return md_create_maybe_static<key_definitely_static, value_definitely_static>(
key, value);
}
template <bool key_definitely_static, bool value_definitely_static>
static grpc_mdelem md_create_maybe_static(const grpc_slice& key,
const grpc_slice& value) {
// Ensure slices are, in fact, static if we claimed they were.
GPR_DEBUG_ASSERT(!key_definitely_static ||
GRPC_IS_STATIC_METADATA_STRING(key));
GPR_DEBUG_ASSERT(!value_definitely_static ||
GRPC_IS_STATIC_METADATA_STRING(value));
GPR_DEBUG_ASSERT(key.refcount != nullptr);
GPR_DEBUG_ASSERT(value.refcount != nullptr);
const bool key_is_static_mdstr =
key_definitely_static ||
key.refcount->GetType() == grpc_slice_refcount::Type::STATIC;
const bool value_is_static_mdstr =
value_definitely_static ||
value.refcount->GetType() == grpc_slice_refcount::Type::STATIC;
const intptr_t kidx = GRPC_STATIC_METADATA_INDEX(key);
// Not all static slice input yields a statically stored metadata element.
// It may be worth documenting why.
if (GRPC_IS_STATIC_METADATA_STRING(key) &&
GRPC_IS_STATIC_METADATA_STRING(value)) {
if (key_is_static_mdstr && value_is_static_mdstr) {
grpc_mdelem static_elem = grpc_static_mdelem_for_static_strings(
GRPC_STATIC_METADATA_INDEX(key), GRPC_STATIC_METADATA_INDEX(value));
kidx, GRPC_STATIC_METADATA_INDEX(value));
if (!GRPC_MDISNULL(static_elem)) {
return static_elem;
}
}
uint32_t hash = GRPC_MDSTR_KV_HASH(grpc_slice_hash_refcounted(key),
grpc_slice_hash_refcounted(value));
uint32_t khash = key_definitely_static
? grpc_static_metadata_hash_values[kidx]
: grpc_slice_hash_refcounted(key);
uint32_t hash = GRPC_MDSTR_KV_HASH(khash, grpc_slice_hash_refcounted(value));
return md_create_must_intern<key_definitely_static>(key, value, hash);
}
template <bool key_definitely_static>
static grpc_mdelem md_create_must_intern(const grpc_slice& key,
const grpc_slice& value,
uint32_t hash) {
// Here, we know both key and value are both at least interned, and both
// possibly static. We know that anything inside the shared interned table is
// also at least interned (and maybe static). Note that equality for a static
// and interned slice implies that they are both the same exact slice.
// The same applies to a pair of interned slices, or a pair of static slices.
// Rather than run the full equality check, we can therefore just do a pointer
// comparison of the refcounts.
InternedMetadata* md;
mdtab_shard* shard = &g_shards[SHARD_IDX(hash)];
size_t idx;
@ -348,7 +446,8 @@ grpc_mdelem grpc_mdelem_create(
idx = TABLE_IDX(hash, shard->capacity);
/* search for an existing pair */
for (md = shard->elems[idx].next; md; md = md->bucket_next()) {
if (grpc_slice_eq(key, md->key()) && grpc_slice_eq(value, md->value())) {
if (grpc_slice_static_interned_equal(key, md->key()) &&
grpc_slice_static_interned_equal(value, md->value())) {
md->RefWithShardLocked(shard);
gpr_mu_unlock(&shard->mu);
return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED);
@ -356,8 +455,12 @@ grpc_mdelem grpc_mdelem_create(
}
/* not found: create a new pair */
md = grpc_core::New<InternedMetadata>(key, value, hash,
shard->elems[idx].next);
md = key_definitely_static
? grpc_core::New<InternedMetadata>(
key, value, hash, shard->elems[idx].next,
static_cast<const InternedMetadata::NoRefKey*>(nullptr))
: grpc_core::New<InternedMetadata>(key, value, hash,
shard->elems[idx].next);
shard->elems[idx].next = md;
shard->count++;
@ -370,9 +473,68 @@ grpc_mdelem grpc_mdelem_create(
return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED);
}
grpc_mdelem grpc_mdelem_create(
const grpc_slice& key, const grpc_slice& value,
grpc_mdelem_data* compatible_external_backing_store) {
return md_create<false>(key, value, compatible_external_backing_store);
}
grpc_mdelem grpc_mdelem_create(
const grpc_core::StaticMetadataSlice& key, const grpc_slice& value,
grpc_mdelem_data* compatible_external_backing_store) {
return md_create<true>(key, value, compatible_external_backing_store);
}
/* Create grpc_mdelem from provided slices. We specify via template parameter
whether we know that the input key is static or not. If it is, we short
circuit various comparisons and a no-op unref. */
template <bool key_definitely_static>
static grpc_mdelem md_from_slices(const grpc_slice& key,
const grpc_slice& value) {
// Ensure key is, in fact, static if we claimed it was.
GPR_DEBUG_ASSERT(!key_definitely_static ||
GRPC_IS_STATIC_METADATA_STRING(key));
grpc_mdelem out = md_create<key_definitely_static>(key, value, nullptr);
if (!key_definitely_static) {
grpc_slice_unref_internal(key);
}
grpc_slice_unref_internal(value);
return out;
}
grpc_mdelem grpc_mdelem_from_slices(const grpc_slice& key,
const grpc_slice& value) {
grpc_mdelem out = grpc_mdelem_create(key, value, nullptr);
return md_from_slices</*key_definitely_static=*/false>(key, value);
}
grpc_mdelem grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice& key,
const grpc_slice& value) {
return md_from_slices</*key_definitely_static=*/true>(key, value);
}
grpc_mdelem grpc_mdelem_from_slices(
const grpc_core::StaticMetadataSlice& key,
const grpc_core::StaticMetadataSlice& value) {
grpc_mdelem out = md_create_maybe_static<true, true>(key, value);
return out;
}
grpc_mdelem grpc_mdelem_from_slices(
const grpc_core::StaticMetadataSlice& key,
const grpc_core::ManagedMemorySlice& value) {
// TODO(arjunroy): We can save the unref if md_create_maybe_static ended up
// creating a new interned metadata. But otherwise - we need this here.
grpc_mdelem out = md_create_maybe_static<true>(key, value);
grpc_slice_unref_internal(value);
return out;
}
grpc_mdelem grpc_mdelem_from_slices(
const grpc_core::ManagedMemorySlice& key,
const grpc_core::ManagedMemorySlice& value) {
grpc_mdelem out = md_create_maybe_static<false>(key, value);
// TODO(arjunroy): We can save the unref if md_create_maybe_static ended up
// creating a new interned metadata. But otherwise - we need this here.
grpc_slice_unref_internal(key);
grpc_slice_unref_internal(value);
return out;

@ -118,10 +118,31 @@ struct grpc_mdelem {
((grpc_mdelem_data_storage)((md).payload & \
(uintptr_t)GRPC_MDELEM_STORAGE_INTERNED_BIT))
/* Unrefs the slices. */
/* Given arbitrary input slices, create a grpc_mdelem object. The caller refs
* the input slices; we unref them. This method is always safe to call; however,
* if we know data about the slices in question (e.g. if we knew our key was
* static) we can call specializations that save on cycle count. */
grpc_mdelem grpc_mdelem_from_slices(const grpc_slice& key,
const grpc_slice& value);
/* Like grpc_mdelem_from_slices, but we know that key is a static slice. This
saves us a few branches and a no-op call to md_unref() for the key. */
grpc_mdelem grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice& key,
const grpc_slice& value);
/* Like grpc_mdelem_from_slices, but key is static and val is static. */
grpc_mdelem grpc_mdelem_from_slices(
const grpc_core::StaticMetadataSlice& key,
const grpc_core::StaticMetadataSlice& value);
/* Like grpc_mdelem_from_slices, but key is static and val is interned. */
grpc_mdelem grpc_mdelem_from_slices(const grpc_core::StaticMetadataSlice& key,
const grpc_core::ManagedMemorySlice& value);
/* Like grpc_mdelem_from_slices, but key and val are interned. */
grpc_mdelem grpc_mdelem_from_slices(const grpc_core::ManagedMemorySlice& key,
const grpc_core::ManagedMemorySlice& value);
/* Cheaply convert a grpc_metadata to a grpc_mdelem; may use the grpc_metadata
object as backing storage (so lifetimes should align) */
grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata* metadata);
@ -134,6 +155,11 @@ grpc_mdelem grpc_mdelem_create(
const grpc_slice& key, const grpc_slice& value,
grpc_mdelem_data* compatible_external_backing_store);
/* Like grpc_mdelem_create, but we know that key is static. */
grpc_mdelem grpc_mdelem_create(
const grpc_core::StaticMetadataSlice& key, const grpc_slice& value,
grpc_mdelem_data* compatible_external_backing_store);
#define GRPC_MDKEY(md) (GRPC_MDELEM_DATA(md)->key)
#define GRPC_MDVALUE(md) (GRPC_MDELEM_DATA(md)->value)
@ -239,6 +265,10 @@ class RefcountedMdBase {
}
protected:
#ifndef NDEBUG
void TraceAtStart(const char* tag);
#endif
intptr_t RefValue() { return refcnt_.Load(MemoryOrder::RELAXED); }
bool AllRefsDropped() { return refcnt_.Load(MemoryOrder::ACQUIRE) == 0; }
bool FirstRef() { return refcnt_.FetchAdd(1, MemoryOrder::RELAXED) == 0; }
@ -253,16 +283,19 @@ class RefcountedMdBase {
class InternedMetadata : public RefcountedMdBase {
public:
// TODO(arjunroy): Change to use strongly typed slices instead.
struct NoRefKey {};
struct BucketLink {
explicit BucketLink(InternedMetadata* md) : next(md) {}
InternedMetadata* next = nullptr;
};
InternedMetadata(const grpc_slice& key, const grpc_slice& value,
uint32_t hash, InternedMetadata* next);
~InternedMetadata();
InternedMetadata(const grpc_slice& key, const grpc_slice& value,
uint32_t hash, InternedMetadata* next, const NoRefKey*);
~InternedMetadata();
void RefWithShardLocked(mdtab_shard* shard);
UserData* user_data() { return &user_data_; }
InternedMetadata* bucket_next() { return link_.next; }
@ -278,7 +311,15 @@ class InternedMetadata : public RefcountedMdBase {
/* Shadow structure for grpc_mdelem_data for allocated elements */
class AllocatedMetadata : public RefcountedMdBase {
public:
// TODO(arjunroy): Change to use strongly typed slices instead.
struct NoRefKey {};
AllocatedMetadata(const grpc_slice& key, const grpc_slice& value);
AllocatedMetadata(const grpc_core::ManagedMemorySlice& key,
const grpc_core::UnmanagedMemorySlice& value);
AllocatedMetadata(const grpc_core::ExternallyManagedSlice& key,
const grpc_core::UnmanagedMemorySlice& value);
AllocatedMetadata(const grpc_slice& key, const grpc_slice& value,
const NoRefKey*);
~AllocatedMetadata();
UserData* user_data() { return &user_data_; }
@ -374,4 +415,35 @@ inline void grpc_mdelem_unref(grpc_mdelem gmd) {
void grpc_mdctx_global_init(void);
void grpc_mdctx_global_shutdown();
/* Like grpc_mdelem_from_slices, but we know that key is a static or interned
slice and value is not static or interned. This gives us an inlinable
fastpath - we know we must allocate metadata now, and that we do not need to
unref the value (rather, we just transfer the ref). We can avoid a ref since:
1) the key slice is passed in already ref'd
2) We're guaranteed to create a new Allocated slice, thus meaning the
ref can be considered 'transferred'.*/
inline grpc_mdelem grpc_mdelem_from_slices(
const grpc_core::ManagedMemorySlice& key,
const grpc_core::UnmanagedMemorySlice& value) {
using grpc_core::AllocatedMetadata;
return GRPC_MAKE_MDELEM(grpc_core::New<AllocatedMetadata>(key, value),
GRPC_MDELEM_STORAGE_ALLOCATED);
}
inline grpc_mdelem grpc_mdelem_from_slices(
const grpc_core::ExternallyManagedSlice& key,
const grpc_core::UnmanagedMemorySlice& value) {
using grpc_core::AllocatedMetadata;
return GRPC_MAKE_MDELEM(grpc_core::New<AllocatedMetadata>(key, value),
GRPC_MDELEM_STORAGE_ALLOCATED);
}
inline grpc_mdelem grpc_mdelem_from_slices(
const grpc_core::StaticMetadataSlice& key,
const grpc_core::UnmanagedMemorySlice& value) {
using grpc_core::AllocatedMetadata;
return GRPC_MAKE_MDELEM(grpc_core::New<AllocatedMetadata>(key, value),
GRPC_MDELEM_STORAGE_ALLOCATED);
}
#endif /* GRPC_CORE_LIB_TRANSPORT_METADATA_H */

File diff suppressed because it is too large Load Diff

@ -33,8 +33,12 @@
#include "src/core/lib/transport/metadata.h"
static_assert(
std::is_trivially_destructible<grpc_core::StaticMetadataSlice>::value,
"grpc_core::StaticMetadataSlice must be trivially destructible.");
#define GRPC_STATIC_MDSTR_COUNT 106
extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
extern const grpc_core::StaticMetadataSlice
grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];
/* ":path" */
#define GRPC_MDSTR_PATH (grpc_static_slice_table[0])
/* ":method" */

@ -94,7 +94,7 @@ void CensusClientCallData::StartTransportStreamOpBatch(
op->send_initial_metadata()->batch(), &tracing_bin_,
grpc_mdelem_from_slices(
GRPC_MDSTR_GRPC_TRACE_BIN,
grpc_slice_from_copied_buffer(tracing_buf_, tracing_len))));
grpc_core::UnmanagedMemorySlice(tracing_buf_, tracing_len))));
}
grpc_slice tags = grpc_empty_slice();
// TODO: Add in tagging serialization.

@ -155,7 +155,7 @@ void CensusServerCallData::StartTransportStreamOpBatch(
op->send_trailing_metadata()->batch(), &census_bin_,
grpc_mdelem_from_slices(
GRPC_MDSTR_GRPC_SERVER_STATS_BIN,
grpc_slice_from_copied_buffer(stats_buf_, len))));
grpc_core::UnmanagedMemorySlice(stats_buf_, len))));
}
}
// Call next op.

@ -21,6 +21,7 @@
#include <benchmark/benchmark.h>
#include <grpc/grpc.h>
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/static_metadata.h"
@ -30,7 +31,7 @@
static void BM_SliceFromStatic(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
benchmark::DoNotOptimize(grpc_slice_from_static_string("abc"));
benchmark::DoNotOptimize(grpc_core::ExternallyManagedSlice("abc"));
}
track_counters.Finish(state);
}
@ -39,7 +40,7 @@ BENCHMARK(BM_SliceFromStatic);
static void BM_SliceFromCopied(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
grpc_slice_unref(grpc_slice_from_copied_string("abc"));
grpc_slice_unref(grpc_core::UnmanagedMemorySlice("abc"));
}
track_counters.Finish(state);
}
@ -47,9 +48,9 @@ BENCHMARK(BM_SliceFromCopied);
static void BM_SliceIntern(benchmark::State& state) {
TrackCounters track_counters;
gpr_slice slice = grpc_slice_from_static_string("abc");
grpc_core::ExternallyManagedSlice slice("abc");
while (state.KeepRunning()) {
grpc_slice_unref(grpc_slice_intern(slice));
grpc_slice_unref(grpc_core::ManagedMemorySlice(&slice));
}
track_counters.Finish(state);
}
@ -57,11 +58,11 @@ BENCHMARK(BM_SliceIntern);
static void BM_SliceReIntern(benchmark::State& state) {
TrackCounters track_counters;
gpr_slice slice = grpc_slice_intern(grpc_slice_from_static_string("abc"));
grpc_core::ExternallyManagedSlice static_slice("abc");
grpc_core::ManagedMemorySlice slice(&static_slice);
while (state.KeepRunning()) {
grpc_slice_unref(grpc_slice_intern(slice));
grpc_slice_unref(grpc_core::ManagedMemorySlice(&slice));
}
grpc_slice_unref(slice);
track_counters.Finish(state);
}
BENCHMARK(BM_SliceReIntern);
@ -69,7 +70,7 @@ BENCHMARK(BM_SliceReIntern);
static void BM_SliceInternStaticMetadata(benchmark::State& state) {
TrackCounters track_counters;
while (state.KeepRunning()) {
grpc_slice_intern(GRPC_MDSTR_GZIP);
benchmark::DoNotOptimize(grpc_core::ManagedMemorySlice(&GRPC_MDSTR_GZIP));
}
track_counters.Finish(state);
}
@ -77,9 +78,9 @@ BENCHMARK(BM_SliceInternStaticMetadata);
static void BM_SliceInternEqualToStaticMetadata(benchmark::State& state) {
TrackCounters track_counters;
gpr_slice slice = grpc_slice_from_static_string("gzip");
grpc_core::ExternallyManagedSlice slice("gzip");
while (state.KeepRunning()) {
grpc_slice_intern(slice);
benchmark::DoNotOptimize(grpc_core::ManagedMemorySlice(&slice));
}
track_counters.Finish(state);
}
@ -87,8 +88,8 @@ BENCHMARK(BM_SliceInternEqualToStaticMetadata);
static void BM_MetadataFromNonInternedSlices(benchmark::State& state) {
TrackCounters track_counters;
gpr_slice k = grpc_slice_from_static_string("key");
gpr_slice v = grpc_slice_from_static_string("value");
grpc_core::ExternallyManagedSlice k("key");
grpc_core::ExternallyManagedSlice v("value");
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
@ -100,8 +101,8 @@ BENCHMARK(BM_MetadataFromNonInternedSlices);
static void BM_MetadataFromInternedSlices(benchmark::State& state) {
TrackCounters track_counters;
gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
gpr_slice v = grpc_slice_intern(grpc_slice_from_static_string("value"));
grpc_core::ManagedMemorySlice k("key");
grpc_core::ManagedMemorySlice v("value");
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
@ -116,8 +117,8 @@ BENCHMARK(BM_MetadataFromInternedSlices);
static void BM_MetadataFromInternedSlicesAlreadyInIndex(
benchmark::State& state) {
TrackCounters track_counters;
gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
gpr_slice v = grpc_slice_intern(grpc_slice_from_static_string("value"));
grpc_core::ManagedMemorySlice k("key");
grpc_core::ManagedMemorySlice v("value");
grpc_core::ExecCtx exec_ctx;
grpc_mdelem seed = grpc_mdelem_create(k, v, nullptr);
while (state.KeepRunning()) {
@ -133,8 +134,8 @@ BENCHMARK(BM_MetadataFromInternedSlicesAlreadyInIndex);
static void BM_MetadataFromInternedKey(benchmark::State& state) {
TrackCounters track_counters;
gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
gpr_slice v = grpc_slice_from_static_string("value");
grpc_core::ManagedMemorySlice k("key");
grpc_core::ExternallyManagedSlice v("value");
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
@ -148,8 +149,8 @@ BENCHMARK(BM_MetadataFromInternedKey);
static void BM_MetadataFromNonInternedSlicesWithBackingStore(
benchmark::State& state) {
TrackCounters track_counters;
gpr_slice k = grpc_slice_from_static_string("key");
gpr_slice v = grpc_slice_from_static_string("value");
grpc_core::ExternallyManagedSlice k("key");
grpc_core::ExternallyManagedSlice v("value");
char backing_store[sizeof(grpc_mdelem_data)];
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
@ -164,8 +165,8 @@ BENCHMARK(BM_MetadataFromNonInternedSlicesWithBackingStore);
static void BM_MetadataFromInternedSlicesWithBackingStore(
benchmark::State& state) {
TrackCounters track_counters;
gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
gpr_slice v = grpc_slice_intern(grpc_slice_from_static_string("value"));
grpc_core::ManagedMemorySlice k("key");
grpc_core::ManagedMemorySlice v("value");
char backing_store[sizeof(grpc_mdelem_data)];
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
@ -182,8 +183,8 @@ BENCHMARK(BM_MetadataFromInternedSlicesWithBackingStore);
static void BM_MetadataFromInternedKeyWithBackingStore(
benchmark::State& state) {
TrackCounters track_counters;
gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
gpr_slice v = grpc_slice_from_static_string("value");
grpc_core::ManagedMemorySlice k("key");
grpc_core::ExternallyManagedSlice v("value");
char backing_store[sizeof(grpc_mdelem_data)];
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
@ -198,14 +199,12 @@ BENCHMARK(BM_MetadataFromInternedKeyWithBackingStore);
static void BM_MetadataFromStaticMetadataStrings(benchmark::State& state) {
TrackCounters track_counters;
gpr_slice k = GRPC_MDSTR_STATUS;
gpr_slice v = GRPC_MDSTR_200;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
GRPC_MDELEM_UNREF(
grpc_mdelem_create(GRPC_MDSTR_STATUS, GRPC_MDSTR_200, nullptr));
}
grpc_slice_unref(k);
track_counters.Finish(state);
}
BENCHMARK(BM_MetadataFromStaticMetadataStrings);
@ -213,14 +212,12 @@ BENCHMARK(BM_MetadataFromStaticMetadataStrings);
static void BM_MetadataFromStaticMetadataStringsNotIndexed(
benchmark::State& state) {
TrackCounters track_counters;
gpr_slice k = GRPC_MDSTR_STATUS;
gpr_slice v = GRPC_MDSTR_GZIP;
grpc_core::ExecCtx exec_ctx;
while (state.KeepRunning()) {
GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
GRPC_MDELEM_UNREF(
grpc_mdelem_create(GRPC_MDSTR_STATUS, GRPC_MDSTR_GZIP, nullptr));
}
grpc_slice_unref(k);
track_counters.Finish(state);
}
BENCHMARK(BM_MetadataFromStaticMetadataStringsNotIndexed);
@ -229,9 +226,10 @@ static void BM_MetadataRefUnrefExternal(benchmark::State& state) {
TrackCounters track_counters;
char backing_store[sizeof(grpc_mdelem_data)];
grpc_core::ExecCtx exec_ctx;
grpc_mdelem el = grpc_mdelem_create(
grpc_slice_from_static_string("a"), grpc_slice_from_static_string("b"),
reinterpret_cast<grpc_mdelem_data*>(backing_store));
grpc_mdelem el =
grpc_mdelem_create(grpc_core::ExternallyManagedSlice("a"),
grpc_core::ExternallyManagedSlice("b"),
reinterpret_cast<grpc_mdelem_data*>(backing_store));
while (state.KeepRunning()) {
GRPC_MDELEM_UNREF(GRPC_MDELEM_REF(el));
}
@ -245,8 +243,8 @@ static void BM_MetadataRefUnrefInterned(benchmark::State& state) {
TrackCounters track_counters;
char backing_store[sizeof(grpc_mdelem_data)];
grpc_core::ExecCtx exec_ctx;
gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
gpr_slice v = grpc_slice_intern(grpc_slice_from_static_string("value"));
grpc_core::ManagedMemorySlice k("key");
grpc_core::ManagedMemorySlice v("value");
grpc_mdelem el = grpc_mdelem_create(
k, v, reinterpret_cast<grpc_mdelem_data*>(backing_store));
grpc_slice_unref(k);
@ -264,8 +262,8 @@ static void BM_MetadataRefUnrefAllocated(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
grpc_mdelem el =
grpc_mdelem_create(grpc_slice_from_static_string("a"),
grpc_slice_from_static_string("b"), nullptr);
grpc_mdelem_create(grpc_core::ExternallyManagedSlice("a"),
grpc_core::ExternallyManagedSlice("b"), nullptr);
while (state.KeepRunning()) {
GRPC_MDELEM_UNREF(GRPC_MDELEM_REF(el));
}

@ -392,16 +392,21 @@ for i, elem in enumerate(all_strs):
def slice_def(i):
return ('{&grpc_static_metadata_refcounts[%d],'
' {{%d, g_bytes+%d}}}') % (i, len(all_strs[i]), id2strofs[i])
return (
'grpc_core::StaticMetadataSlice(&grpc_static_metadata_refcounts[%d], %d, g_bytes+%d)'
) % (i, len(all_strs[i]), id2strofs[i])
# validate configuration
for elem in METADATA_BATCH_CALLOUTS:
assert elem in all_strs
static_slice_dest_assert = (
'static_assert(std::is_trivially_destructible' +
'<grpc_core::StaticMetadataSlice>::value, '
'"grpc_core::StaticMetadataSlice must be trivially destructible.");')
print >> H, static_slice_dest_assert
print >> H, '#define GRPC_STATIC_MDSTR_COUNT %d' % len(all_strs)
print >> H, ('extern const grpc_slice '
print >> H, ('extern const grpc_core::StaticMetadataSlice '
'grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];')
for i, elem in enumerate(all_strs):
print >> H, '/* "%s" */' % elem
@ -425,8 +430,9 @@ print >> H, '#define GRPC_IS_STATIC_METADATA_STRING(slice) \\'
print >> H, (' ((slice).refcount != NULL && (slice).refcount->GetType() == '
'grpc_slice_refcount::Type::STATIC)')
print >> H
print >> C, ('const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT]'
' = {')
print >> C, (
'const grpc_core::StaticMetadataSlice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT]'
' = {')
for i, elem in enumerate(all_strs):
print >> C, slice_def(i) + ','
print >> C, '};'

Loading…
Cancel
Save