Merge pull request #20353 from arjunroy/hpack_encoder_refactor_only

Refactor hpack encoder to be smaller footprint.
pull/20517/head
Arjun Roy 5 years ago committed by GitHub
commit 2486b0a2b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 414
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  2. 36
      src/core/ext/transport/chttp2/transport/hpack_encoder.h

@ -42,6 +42,20 @@
#include "src/core/lib/transport/static_metadata.h" #include "src/core/lib/transport/static_metadata.h"
#include "src/core/lib/transport/timeout_encoding.h" #include "src/core/lib/transport/timeout_encoding.h"
namespace {
/* (Maybe-cuckoo) hpack encoder hash table implementation.
This hashtable implementation is a subset of a proper cuckoo hash; while we
have fallback cells that a value can be hashed to if the first cell is full,
we do not attempt to iteratively rearrange entries into backup cells to get
things to fit. Instead, if both a cell and the backup cell for a value are
occupied, the older existing entry is evicted.
Note that we can disable backup-cell picking by setting
GRPC_HPACK_ENCODER_USE_CUCKOO_HASH to 0. In that case, we simply evict an
existing entry rather than try to use a backup. Hence, "maybe-cuckoo."
TODO(arjunroy): Add unit tests for hashtable implementation. */
#define GRPC_HPACK_ENCODER_USE_CUCKOO_HASH 1
#define HASH_FRAGMENT_MASK (GRPC_CHTTP2_HPACKC_NUM_VALUES - 1) #define HASH_FRAGMENT_MASK (GRPC_CHTTP2_HPACKC_NUM_VALUES - 1)
#define HASH_FRAGMENT_1(x) ((x)&HASH_FRAGMENT_MASK) #define HASH_FRAGMENT_1(x) ((x)&HASH_FRAGMENT_MASK)
#define HASH_FRAGMENT_2(x) \ #define HASH_FRAGMENT_2(x) \
@ -51,21 +65,200 @@
#define HASH_FRAGMENT_4(x) \ #define HASH_FRAGMENT_4(x) \
(((x) >> (GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS * 3)) & HASH_FRAGMENT_MASK) (((x) >> (GRPC_CHTTP2_HPACKC_NUM_VALUES_BITS * 3)) & HASH_FRAGMENT_MASK)
/* don't consider adding anything bigger than this to the hpack table */
constexpr size_t kMaxDecoderSpaceUsage = 512;
constexpr size_t kDataFrameHeaderSize = 9;
constexpr uint8_t kMaxFilterValue = 255;
/* if the probability of this item being seen again is < 1/x then don't add /* if the probability of this item being seen again is < 1/x then don't add
it to the table */ it to the table */
#define ONE_ON_ADD_PROBABILITY (GRPC_CHTTP2_HPACKC_NUM_VALUES >> 1) #define ONE_ON_ADD_PROBABILITY (GRPC_CHTTP2_HPACKC_NUM_VALUES >> 1)
/* don't consider adding anything bigger than this to the hpack table */ /* The hpack index we encode over the wire. Meaningful to the hpack encoder and
#define MAX_DECODER_SPACE_USAGE 512 parser on the remote end as well as HTTP2. *Not* the same as
HpackEncoderSlotHash, which is only meaningful to the hpack encoder
#define DATA_FRAME_HEADER_SIZE 9 implementation (HpackEncoderSlotHash is used for the hashtable implementation
when mapping from metadata to HpackEncoderIndex. */
typedef uint32_t HpackEncoderIndex;
/* Internal-table bookkeeping (*not* the hpack index). */
typedef uint32_t HpackEncoderSlotHash;
struct SliceRefComparator {
typedef grpc_slice_refcount* Type;
static grpc_slice_refcount* Null() { return nullptr; }
static bool IsNull(const grpc_slice_refcount* sref) {
return sref == nullptr;
}
static bool Equals(const grpc_slice_refcount* s1,
const grpc_slice_refcount* s2) {
return s1 == s2;
}
static void Ref(grpc_slice_refcount* sref) {
GPR_DEBUG_ASSERT(sref != nullptr);
sref->Ref();
}
static void Unref(grpc_slice_refcount* sref) {
GPR_DEBUG_ASSERT(sref != nullptr);
sref->Unref();
}
};
static grpc_slice_refcount terminal_slice_refcount( struct MetadataComparator {
grpc_slice_refcount::Type::STATIC); typedef grpc_mdelem Type;
static const grpc_slice terminal_slice = { static const grpc_mdelem Null() { return {0}; }
&terminal_slice_refcount, /* refcount */ static bool IsNull(const grpc_mdelem md) { return md.payload == 0; }
{{0, nullptr}} /* data.refcounted */ static bool Equals(const grpc_mdelem md1, const grpc_mdelem md2) {
return md1.payload == md2.payload;
}
static void Ref(grpc_mdelem md) {
GPR_DEBUG_ASSERT(md.payload != 0);
GRPC_MDELEM_REF(md);
}
static void Unref(grpc_mdelem md) {
GPR_DEBUG_ASSERT(md.payload != 0);
GRPC_MDELEM_UNREF(md);
}
}; };
/* Index table management */
template <typename Hashtable>
static HpackEncoderIndex HpackIndex(const Hashtable* hashtable,
HpackEncoderSlotHash hash_index) {
return hashtable[hash_index].index;
}
template <typename ValueType, typename Hashtable>
static const ValueType& GetEntry(const Hashtable* hashtable,
HpackEncoderSlotHash hash_index) {
return hashtable[hash_index].value;
}
template <typename Cmp, typename Hashtable>
static bool TableEmptyAt(const Hashtable* hashtable,
HpackEncoderSlotHash hash_index) {
return Cmp::Equals(hashtable[hash_index].value, Cmp::Null());
}
template <typename Cmp, typename Hashtable, typename ValueType>
static bool Matches(const Hashtable* hashtable, const ValueType& value,
HpackEncoderSlotHash hash_index) {
return Cmp::Equals(value, hashtable[hash_index].value);
}
template <typename Hashtable>
static void UpdateIndex(Hashtable* hashtable, HpackEncoderSlotHash hash_index,
HpackEncoderIndex hpack_index) {
hashtable[hash_index].index = hpack_index;
}
template <typename Hashtable, typename ValueType>
static void SetIndex(Hashtable* hashtable, HpackEncoderSlotHash hash_index,
const ValueType& value, HpackEncoderIndex hpack_index) {
hashtable[hash_index].value = value;
UpdateIndex(hashtable, hash_index, hpack_index);
}
template <typename Cmp, typename Hashtable, typename ValueType>
static bool GetMatchingIndex(Hashtable* hashtable, const ValueType& value,
uint32_t value_hash, HpackEncoderIndex* index) {
const HpackEncoderSlotHash cuckoo_first = HASH_FRAGMENT_2(value_hash);
if (Matches<Cmp>(hashtable, value, cuckoo_first)) {
*index = HpackIndex(hashtable, cuckoo_first);
return true;
}
#if GRPC_HPACK_ENCODER_USE_CUCKOO_HASH
const HpackEncoderSlotHash cuckoo_second = HASH_FRAGMENT_3(value_hash);
if (Matches<Cmp>(hashtable, value, cuckoo_second)) {
*index = HpackIndex(hashtable, cuckoo_second);
return true;
}
#endif
return false;
}
template <typename Cmp, typename Hashtable, typename ValueType>
static ValueType ReplaceOlderIndex(Hashtable* hashtable, const ValueType& value,
HpackEncoderSlotHash hash_index_a,
HpackEncoderSlotHash hash_index_b,
HpackEncoderIndex new_index) {
const HpackEncoderIndex hpack_idx_a = hashtable[hash_index_a].index;
const HpackEncoderIndex hpack_idx_b = hashtable[hash_index_b].index;
const HpackEncoderSlotHash id =
hpack_idx_a < hpack_idx_b ? hash_index_a : hash_index_b;
ValueType old = GetEntry<typename Cmp::Type>(hashtable, id);
SetIndex(hashtable, id, value, new_index);
return old;
}
template <typename Cmp, typename Hashtable, typename ValueType>
static void UpdateAddOrEvict(Hashtable hashtable, const ValueType& value,
uint32_t value_hash, HpackEncoderIndex new_index) {
const HpackEncoderSlotHash cuckoo_first = HASH_FRAGMENT_2(value_hash);
if (Matches<Cmp>(hashtable, value, cuckoo_first)) {
UpdateIndex(hashtable, cuckoo_first, new_index);
return;
}
if (TableEmptyAt<Cmp>(hashtable, cuckoo_first)) {
Cmp::Ref(value);
SetIndex(hashtable, cuckoo_first, value, new_index);
return;
}
#if GRPC_HPACK_ENCODER_USE_CUCKOO_HASH
const HpackEncoderSlotHash cuckoo_second = HASH_FRAGMENT_3(value_hash);
if (Matches<Cmp>(hashtable, value, cuckoo_second)) {
UpdateIndex(hashtable, cuckoo_second, new_index);
return;
}
Cmp::Ref(value);
if (TableEmptyAt<Cmp>(hashtable, cuckoo_second)) {
SetIndex(hashtable, cuckoo_second, value, new_index);
return;
}
Cmp::Unref(ReplaceOlderIndex<Cmp>(hashtable, value, cuckoo_first,
cuckoo_second, new_index));
#else
ValueType old = GetEntry<typename Cmp::Type>(hashtable, cuckoo_first);
SetIndex(hashtable, cuckoo_first, value, new_index);
Cmp::Unref(old);
#endif
}
/* halve all counts because an element reached max */
static void HalveFilter(uint8_t idx, uint32_t* sum, uint8_t* elems) {
*sum = 0;
for (int i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
elems[i] /= 2;
(*sum) += elems[i];
}
}
/* increment a filter count, halve all counts if one element reaches max */
static void IncrementFilter(uint8_t idx, uint32_t* sum, uint8_t* elems) {
elems[idx]++;
if (GPR_LIKELY(elems[idx] < kMaxFilterValue)) {
(*sum)++;
} else {
HalveFilter(idx, sum, elems);
}
}
static uint32_t UpdateHashtablePopularity(
grpc_chttp2_hpack_compressor* hpack_compressor, uint32_t elem_hash) {
const uint32_t popularity_hash = HASH_FRAGMENT_1(elem_hash);
IncrementFilter(popularity_hash, &hpack_compressor->filter_elems_sum,
hpack_compressor->filter_elems);
return popularity_hash;
}
static bool CanAddToHashtable(grpc_chttp2_hpack_compressor* hpack_compressor,
uint32_t popularity_hash) {
const bool can_add =
hpack_compressor->filter_elems[popularity_hash] >=
hpack_compressor->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
return can_add;
}
} /* namespace */
typedef struct { typedef struct {
int is_first_frame; int is_first_frame;
/* number of bytes in 'output' when we started the frame - used to calculate /* number of bytes in 'output' when we started the frame - used to calculate
@ -73,8 +266,10 @@ typedef struct {
size_t output_length_at_start_of_frame; size_t output_length_at_start_of_frame;
/* index (in output) of the header for the current frame */ /* index (in output) of the header for the current frame */
size_t header_idx; size_t header_idx;
#ifndef NDEBUG
/* have we seen a regular (non-colon-prefixed) header yet? */ /* have we seen a regular (non-colon-prefixed) header yet? */
uint8_t seen_regular_header; uint8_t seen_regular_header;
#endif
/* output stream id */ /* output stream id */
uint32_t stream_id; uint32_t stream_id;
grpc_slice_buffer* output; grpc_slice_buffer* output;
@ -84,7 +279,7 @@ typedef struct {
bool use_true_binary_metadata; bool use_true_binary_metadata;
} framer_state; } framer_state;
/* fills p (which is expected to be DATA_FRAME_HEADER_SIZE bytes long) /* fills p (which is expected to be kDataFrameHeaderSize bytes long)
* with a data frame header */ * with a data frame header */
static void fill_header(uint8_t* p, uint8_t type, uint32_t id, size_t len, static void fill_header(uint8_t* p, uint8_t type, uint32_t id, size_t len,
uint8_t flags) { uint8_t flags) {
@ -131,7 +326,7 @@ static void finish_frame(framer_state* st, int is_header_boundary,
static_cast<uint8_t>( static_cast<uint8_t>(
(is_last_in_stream ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0) | (is_last_in_stream ? GRPC_CHTTP2_DATA_FLAG_END_STREAM : 0) |
(is_header_boundary ? GRPC_CHTTP2_DATA_FLAG_END_HEADERS : 0))); (is_header_boundary ? GRPC_CHTTP2_DATA_FLAG_END_HEADERS : 0)));
st->stats->framing_bytes += DATA_FRAME_HEADER_SIZE; st->stats->framing_bytes += kDataFrameHeaderSize;
st->is_first_frame = 0; st->is_first_frame = 0;
} }
@ -140,7 +335,7 @@ static void finish_frame(framer_state* st, int is_header_boundary,
static void begin_frame(framer_state* st) { static void begin_frame(framer_state* st) {
grpc_slice reserved; grpc_slice reserved;
reserved.refcount = nullptr; reserved.refcount = nullptr;
reserved.data.inlined.length = DATA_FRAME_HEADER_SIZE; reserved.data.inlined.length = kDataFrameHeaderSize;
st->header_idx = grpc_slice_buffer_add_indexed(st->output, reserved); st->header_idx = grpc_slice_buffer_add_indexed(st->output, reserved);
st->output_length_at_start_of_frame = st->output->length; st->output_length_at_start_of_frame = st->output->length;
} }
@ -156,21 +351,6 @@ static void ensure_space(framer_state* st, size_t need_bytes) {
begin_frame(st); begin_frame(st);
} }
/* increment a filter count, halve all counts if one element reaches max */
static void inc_filter(uint8_t idx, uint32_t* sum, uint8_t* elems) {
elems[idx]++;
if (elems[idx] < 255) {
(*sum)++;
} else {
int i;
*sum = 0;
for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
elems[i] /= 2;
(*sum) += elems[i];
}
}
}
static void add_header_data(framer_state* st, grpc_slice slice) { static void add_header_data(framer_state* st, grpc_slice slice) {
size_t len = GRPC_SLICE_LENGTH(slice); size_t len = GRPC_SLICE_LENGTH(slice);
size_t remaining; size_t remaining;
@ -228,7 +408,6 @@ static uint32_t prepare_space_for_new_elem(grpc_chttp2_hpack_compressor* c,
while (c->table_size + elem_size > c->max_table_size) { while (c->table_size + elem_size > c->max_table_size) {
evict_entry(c); evict_entry(c);
} }
// TODO(arjunroy): Are we conflating size in bytes vs. membership?
GPR_ASSERT(c->table_elems < c->max_table_size); GPR_ASSERT(c->table_elems < c->max_table_size);
c->table_elem_size[new_index % c->cap_table_elems] = c->table_elem_size[new_index % c->cap_table_elems] =
static_cast<uint16_t>(elem_size); static_cast<uint16_t>(elem_size);
@ -240,97 +419,37 @@ static uint32_t prepare_space_for_new_elem(grpc_chttp2_hpack_compressor* c,
// Add a key to the dynamic table. Both key and value will be added to table at // Add a key to the dynamic table. Both key and value will be added to table at
// the decoder. // the decoder.
static void add_key_with_index(grpc_chttp2_hpack_compressor* c, static void AddKeyWithIndex(grpc_chttp2_hpack_compressor* c,
grpc_mdelem elem, uint32_t new_index, grpc_slice_refcount* key_ref, uint32_t new_index,
uint32_t key_hash) { uint32_t key_hash) {
if (new_index == 0) { UpdateAddOrEvict<SliceRefComparator>(c->key_table.entries, key_ref, key_hash,
return; new_index);
}
/* Store the key into {entries,indices}_keys */
if (grpc_slice_static_interned_equal(
c->entries_keys[HASH_FRAGMENT_2(key_hash)], GRPC_MDKEY(elem))) {
c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
} else if (grpc_slice_static_interned_equal(
c->entries_keys[HASH_FRAGMENT_3(key_hash)],
GRPC_MDKEY(elem))) {
c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
} else if (c->entries_keys[HASH_FRAGMENT_2(key_hash)].refcount ==
&terminal_slice_refcount) {
c->entries_keys[HASH_FRAGMENT_2(key_hash)] =
grpc_slice_ref_internal(GRPC_MDKEY(elem));
c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
} else if (c->entries_keys[HASH_FRAGMENT_3(key_hash)].refcount ==
&terminal_slice_refcount) {
c->entries_keys[HASH_FRAGMENT_3(key_hash)] =
grpc_slice_ref_internal(GRPC_MDKEY(elem));
c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
} else if (c->indices_keys[HASH_FRAGMENT_2(key_hash)] <
c->indices_keys[HASH_FRAGMENT_3(key_hash)]) {
grpc_slice_unref_internal(c->entries_keys[HASH_FRAGMENT_2(key_hash)]);
c->entries_keys[HASH_FRAGMENT_2(key_hash)] =
grpc_slice_ref_internal(GRPC_MDKEY(elem));
c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
} else {
grpc_slice_unref_internal(c->entries_keys[HASH_FRAGMENT_3(key_hash)]);
c->entries_keys[HASH_FRAGMENT_3(key_hash)] =
grpc_slice_ref_internal(GRPC_MDKEY(elem));
c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
}
} }
/* add an element to the decoder table */ /* add an element to the decoder table */
static void add_elem_with_index(grpc_chttp2_hpack_compressor* c, static void AddElemWithIndex(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
grpc_mdelem elem, uint32_t new_index, uint32_t new_index, uint32_t elem_hash,
uint32_t elem_hash, uint32_t key_hash) { uint32_t key_hash) {
if (new_index == 0) {
return;
}
GPR_DEBUG_ASSERT(GRPC_MDELEM_IS_INTERNED(elem)); GPR_DEBUG_ASSERT(GRPC_MDELEM_IS_INTERNED(elem));
UpdateAddOrEvict<MetadataComparator>(c->elem_table.entries, elem, elem_hash,
/* Store this element into {entries,indices}_elem */ new_index);
if (grpc_mdelem_both_interned_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], AddKeyWithIndex(c, GRPC_MDKEY(elem).refcount, new_index, key_hash);
elem)) {
/* already there: update with new index */
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
} else if (grpc_mdelem_both_interned_eq(
c->entries_elems[HASH_FRAGMENT_3(elem_hash)], elem)) {
/* already there (cuckoo): update with new index */
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
} else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_2(elem_hash)])) {
/* not there, but a free element: add */
c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
} else if (GRPC_MDISNULL(c->entries_elems[HASH_FRAGMENT_3(elem_hash)])) {
/* not there (cuckoo), but a free element: add */
c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
} else if (c->indices_elems[HASH_FRAGMENT_2(elem_hash)] <
c->indices_elems[HASH_FRAGMENT_3(elem_hash)]) {
/* not there: replace oldest */
GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_2(elem_hash)]);
c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
} else {
/* not there: replace oldest */
GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_3(elem_hash)]);
c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
}
add_key_with_index(c, elem, new_index, key_hash);
} }
static void add_elem(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem, static void add_elem(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
size_t elem_size, uint32_t elem_hash, uint32_t key_hash) { size_t elem_size, uint32_t elem_hash, uint32_t key_hash) {
uint32_t new_index = prepare_space_for_new_elem(c, elem_size); uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
add_elem_with_index(c, elem, new_index, elem_hash, key_hash); if (new_index != 0) {
AddElemWithIndex(c, elem, new_index, elem_hash, key_hash);
}
} }
static void add_key(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem, static void add_key(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
size_t elem_size, uint32_t key_hash) { size_t elem_size, uint32_t key_hash) {
uint32_t new_index = prepare_space_for_new_elem(c, elem_size); uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
add_key_with_index(c, elem, new_index, key_hash); if (new_index != 0) {
AddKeyWithIndex(c, GRPC_MDKEY(elem).refcount, new_index, key_hash);
}
} }
static void emit_indexed(grpc_chttp2_hpack_compressor* c, uint32_t elem_index, static void emit_indexed(grpc_chttp2_hpack_compressor* c, uint32_t elem_index,
@ -516,30 +635,19 @@ static EmitIndexedStatus maybe_emit_indexed(grpc_chttp2_hpack_compressor* c,
->hash() ->hash()
: reinterpret_cast<grpc_core::StaticMetadata*>(GRPC_MDELEM_DATA(elem)) : reinterpret_cast<grpc_core::StaticMetadata*>(GRPC_MDELEM_DATA(elem))
->hash(); ->hash();
/* Update filter to see if we can perhaps add this elem. */
inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum, c->filter_elems); const uint32_t popularity_hash = UpdateHashtablePopularity(c, elem_hash);
/* is this elem currently in the decoders table? */ /* is this elem currently in the decoders table? */
if (grpc_mdelem_both_interned_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], HpackEncoderIndex indices_key;
elem) && if (GetMatchingIndex<MetadataComparator>(c->elem_table.entries, elem,
c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) { elem_hash, &indices_key) &&
/* HIT: complete element (first cuckoo hash) */ indices_key > c->tail_remote_index) {
emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]), emit_indexed(c, dynidx(c, indices_key), st);
st);
return EmitIndexedStatus(elem_hash, true, false);
}
if (grpc_mdelem_both_interned_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)],
elem) &&
c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) {
/* HIT: complete element (second cuckoo hash) */
emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]),
st);
return EmitIndexedStatus(elem_hash, true, false); return EmitIndexedStatus(elem_hash, true, false);
} }
/* Didn't hit either cuckoo index, so no emit. */
const bool can_add = c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >= return EmitIndexedStatus(elem_hash, false,
c->filter_elems_sum / ONE_ON_ADD_PROBABILITY; CanAddToHashtable(c, popularity_hash));
return EmitIndexedStatus(elem_hash, false, can_add);
} }
static void emit_maybe_add(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem, static void emit_maybe_add(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
@ -557,14 +665,15 @@ static void emit_maybe_add(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
/* encode an mdelem */ /* encode an mdelem */
static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem, static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
framer_state* st) { framer_state* st) {
const grpc_slice& elem_key = GRPC_MDKEY(elem);
/* User-provided key len validated in grpc_validate_header_key_is_legal(). */ /* User-provided key len validated in grpc_validate_header_key_is_legal(). */
GPR_DEBUG_ASSERT(GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)) > 0); GPR_DEBUG_ASSERT(GRPC_SLICE_LENGTH(elem_key) > 0);
/* Header ordering: all reserved headers (prefixed with ':') must precede /* Header ordering: all reserved headers (prefixed with ':') must precede
* regular headers. This can be a debug assert, since: * regular headers. This can be a debug assert, since:
* 1) User cannot give us ':' headers (grpc_validate_header_key_is_legal()). * 1) User cannot give us ':' headers (grpc_validate_header_key_is_legal()).
* 2) grpc filters/core should be checked during debug builds. */ * 2) grpc filters/core should be checked during debug builds. */
#ifndef NDEBUG #ifndef NDEBUG
if (GRPC_SLICE_START_PTR(GRPC_MDKEY(elem))[0] != ':') { /* regular header */ if (GRPC_SLICE_START_PTR(elem_key)[0] != ':') { /* regular header */
st->seen_regular_header = 1; st->seen_regular_header = 1;
} else { } else {
GPR_DEBUG_ASSERT( GPR_DEBUG_ASSERT(
@ -575,11 +684,8 @@ static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) { if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
hpack_enc_log(elem); hpack_enc_log(elem);
} }
const bool elem_interned = GRPC_MDELEM_IS_INTERNED(elem); const bool elem_interned = GRPC_MDELEM_IS_INTERNED(elem);
const bool key_interned = const bool key_interned = elem_interned || grpc_slice_is_interned(elem_key);
elem_interned || grpc_slice_is_interned(GRPC_MDKEY(elem));
/* Key is not interned, emit literals. */ /* Key is not interned, emit literals. */
if (!key_interned) { if (!key_interned) {
emit_lithdr_v<EmitLitHdrVType::NO_IDX_V>(c, elem, st); emit_lithdr_v<EmitLitHdrVType::NO_IDX_V>(c, elem, st);
@ -591,38 +697,24 @@ static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
if (ret.emitted) { if (ret.emitted) {
return; return;
} }
/* should this elem be in the table? */ /* should this elem be in the table? */
const size_t decoder_space_usage = const size_t decoder_space_usage =
grpc_chttp2_get_size_in_hpack_table(elem, st->use_true_binary_metadata); grpc_chttp2_get_size_in_hpack_table(elem, st->use_true_binary_metadata);
const bool decoder_space_available = const bool decoder_space_available =
decoder_space_usage < MAX_DECODER_SPACE_USAGE; decoder_space_usage < kMaxDecoderSpaceUsage;
const bool should_add_elem = const bool should_add_elem =
elem_interned && decoder_space_available && ret.can_add; elem_interned && decoder_space_available && ret.can_add;
const uint32_t elem_hash = ret.elem_hash; const uint32_t elem_hash = ret.elem_hash;
/* no hits for the elem... maybe there's a key? */ /* no hits for the elem... maybe there's a key? */
const uint32_t key_hash = GRPC_MDKEY(elem).refcount->Hash(GRPC_MDKEY(elem)); const uint32_t key_hash = elem_key.refcount->Hash(elem_key);
uint32_t indices_key = c->indices_keys[HASH_FRAGMENT_2(key_hash)]; HpackEncoderIndex indices_key;
if (grpc_slice_static_interned_equal( if (GetMatchingIndex<SliceRefComparator>(
c->entries_keys[HASH_FRAGMENT_2(key_hash)], GRPC_MDKEY(elem)) && c->key_table.entries, elem_key.refcount, key_hash, &indices_key) &&
indices_key > c->tail_remote_index) {
/* HIT: key (first cuckoo hash) */
emit_maybe_add(c, elem, st, indices_key, should_add_elem,
decoder_space_usage, elem_hash, key_hash);
return;
}
indices_key = c->indices_keys[HASH_FRAGMENT_3(key_hash)];
if (grpc_slice_static_interned_equal(
c->entries_keys[HASH_FRAGMENT_3(key_hash)], GRPC_MDKEY(elem)) &&
indices_key > c->tail_remote_index) { indices_key > c->tail_remote_index) {
/* HIT: key (second cuckoo hash) */
emit_maybe_add(c, elem, st, indices_key, should_add_elem, emit_maybe_add(c, elem, st, indices_key, should_add_elem,
decoder_space_usage, elem_hash, key_hash); decoder_space_usage, elem_hash, key_hash);
return; return;
} }
/* no elem, key in the table... fall back to literal emission */ /* no elem, key in the table... fall back to literal emission */
const bool should_add_key = !elem_interned && decoder_space_available; const bool should_add_key = !elem_interned && decoder_space_available;
if (should_add_elem || should_add_key) { if (should_add_elem || should_add_key) {
@ -660,22 +752,18 @@ void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor* c) {
c->cap_table_elems = elems_for_bytes(c->max_table_size); c->cap_table_elems = elems_for_bytes(c->max_table_size);
c->max_table_elems = c->cap_table_elems; c->max_table_elems = c->cap_table_elems;
c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE; c->max_usable_size = GRPC_CHTTP2_HPACKC_INITIAL_TABLE_SIZE;
c->table_elem_size = static_cast<uint16_t*>( const size_t alloc_size = sizeof(*c->table_elem_size) * c->cap_table_elems;
gpr_malloc(sizeof(*c->table_elem_size) * c->cap_table_elems)); c->table_elem_size = static_cast<uint16_t*>(gpr_malloc(alloc_size));
memset(c->table_elem_size, 0, memset(c->table_elem_size, 0, alloc_size);
sizeof(*c->table_elem_size) * c->cap_table_elems);
for (size_t i = 0; i < GPR_ARRAY_SIZE(c->entries_keys); i++) {
c->entries_keys[i] = terminal_slice;
}
} }
void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor* c) { void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor* c) {
int i; for (int i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) { auto* const key = GetEntry<grpc_slice_refcount*>(c->key_table.entries, i);
if (c->entries_keys[i].refcount != &terminal_slice_refcount) { if (key != nullptr) {
grpc_slice_unref_internal(c->entries_keys[i]); key->Unref();
} }
GRPC_MDELEM_UNREF(c->entries_elems[i]); GRPC_MDELEM_UNREF(GetEntry<grpc_mdelem>(c->elem_table.entries, i));
} }
gpr_free(c->table_elem_size); gpr_free(c->table_elem_size);
} }
@ -744,7 +832,9 @@ void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor* c,
validates that stream_id is not 0. So, this can be a debug assert. */ validates that stream_id is not 0. So, this can be a debug assert. */
GPR_DEBUG_ASSERT(options->stream_id != 0); GPR_DEBUG_ASSERT(options->stream_id != 0);
framer_state st; framer_state st;
#ifndef NDEBUG
st.seen_regular_header = 0; st.seen_regular_header = 0;
#endif
st.stream_id = options->stream_id; st.stream_id = options->stream_id;
st.output = outbuf; st.output = outbuf;
st.is_first_frame = 1; st.is_first_frame = 1;

@ -38,14 +38,10 @@
extern grpc_core::TraceFlag grpc_http_trace; extern grpc_core::TraceFlag grpc_http_trace;
typedef struct { struct grpc_chttp2_hpack_compressor {
uint32_t filter_elems_sum;
uint32_t max_table_size; uint32_t max_table_size;
uint32_t max_table_elems; uint32_t max_table_elems;
uint32_t cap_table_elems; uint32_t cap_table_elems;
/** if non-zero, advertise to the decoder that we'll start using a table
of this size */
uint8_t advertise_table_size_change;
/** maximum number of bytes we'll use for the decode table (to guard against /** maximum number of bytes we'll use for the decode table (to guard against
peers ooming us by setting decode table size high) */ peers ooming us by setting decode table size high) */
uint32_t max_usable_size; uint32_t max_usable_size;
@ -53,23 +49,39 @@ typedef struct {
uint32_t tail_remote_index; uint32_t tail_remote_index;
uint32_t table_size; uint32_t table_size;
uint32_t table_elems; uint32_t table_elems;
uint16_t* table_elem_size;
/** if non-zero, advertise to the decoder that we'll start using a table
of this size */
uint8_t advertise_table_size_change;
/* filter tables for elems: this tables provides an approximate /* filter tables for elems: this tables provides an approximate
popularity count for particular hashes, and are used to determine whether popularity count for particular hashes, and are used to determine whether
a new literal should be added to the compression table or not. a new literal should be added to the compression table or not.
They track a single integer that counts how often a particular value has They track a single integer that counts how often a particular value has
been seen. When that count reaches max (255), all values are halved. */ been seen. When that count reaches max (255), all values are halved. */
uint32_t filter_elems_sum;
uint8_t filter_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES]; uint8_t filter_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES];
/* entry tables for keys & elems: these tables track values that have been /* entry tables for keys & elems: these tables track values that have been
seen and *may* be in the decompressor table */ seen and *may* be in the decompressor table */
grpc_slice entries_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES]; struct {
grpc_mdelem entries_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES]; struct {
uint32_t indices_keys[GRPC_CHTTP2_HPACKC_NUM_VALUES]; grpc_mdelem value;
uint32_t indices_elems[GRPC_CHTTP2_HPACKC_NUM_VALUES]; uint32_t index;
} entries[GRPC_CHTTP2_HPACKC_NUM_VALUES];
uint16_t* table_elem_size; } elem_table; /* Metadata table management */
} grpc_chttp2_hpack_compressor; struct {
struct {
/* Only store the slice refcount - we do not need the byte buffer or
length of the slice since we only need to store a mapping between the
identity of the slice and the corresponding HPACK index. Since the
slice *must* be static or interned, the refcount is sufficient to
establish identity. */
grpc_slice_refcount* value;
uint32_t index;
} entries[GRPC_CHTTP2_HPACKC_NUM_VALUES];
} key_table; /* Key table management */
};
void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor* c); void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor* c);
void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor* c); void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor* c);

Loading…
Cancel
Save