Merge pull request #16460 from hcaseyal/refactor_transport

Light refactoring of some transport code
pull/16553/head
hcaseyal 7 years ago committed by GitHub
commit cd74b357e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 337
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  2. 2
      src/core/ext/transport/chttp2/transport/internal.h

@ -230,137 +230,14 @@ void grpc_chttp2_ref_transport(grpc_chttp2_transport* t) { gpr_ref(&t->refs); }
static const grpc_transport_vtable* get_vtable(void);
static void init_transport(grpc_chttp2_transport* t,
/* Returns whether bdp is enabled */
static bool read_channel_args(grpc_chttp2_transport* t,
const grpc_channel_args* channel_args,
grpc_endpoint* ep, bool is_client) {
bool is_client) {
bool enable_bdp = true;
size_t i;
int j;
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
t->base.vtable = get_vtable();
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
t->combiner = grpc_combiner_create();
t->peer_string = grpc_endpoint_get_peer(ep);
t->endpoint_reading = 1;
t->next_stream_id = is_client ? 1 : 2;
t->is_client = is_client;
t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->is_first_frame = true;
grpc_connectivity_state_init(
&t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
is_client ? "client_transport" : "server_transport");
grpc_slice_buffer_init(&t->qbuf);
grpc_slice_buffer_init(&t->outbuf);
grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
GRPC_CLOSURE_INIT(&t->read_action_locked, read_action_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->benign_reclaimer_locked, benign_reclaimer_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->destructive_reclaimer_locked,
destructive_reclaimer_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->retry_initiate_ping_locked, retry_initiate_ping_locked,
t, grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->start_bdp_ping_locked, start_bdp_ping_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked, finish_bdp_ping_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->next_bdp_ping_timer_expired_locked,
next_bdp_ping_timer_expired_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping_locked,
t, grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked,
start_keepalive_ping_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked,
finish_keepalive_ping_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->keepalive_watchdog_fired_locked,
keepalive_watchdog_fired_locked, t,
grpc_combiner_scheduler(t->combiner));
t->goaway_error = GRPC_ERROR_NONE;
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(&t->hpack_parser);
grpc_slice_buffer_init(&t->read_buffer);
/* 8 is a random stab in the dark as to a good initial size: it's small enough
that it shouldn't waste memory for infrequently used connections, yet
large enough that the exponential growth should happen nicely when it's
needed.
TODO(ctiller): tune this */
grpc_chttp2_stream_map_init(&t->stream_map, 8);
/* copy in initial settings to all setting sets */
for (i = 0; i < GRPC_CHTTP2_NUM_SETTINGS; i++) {
for (j = 0; j < GRPC_NUM_SETTING_SETS; j++) {
t->settings[j][i] = grpc_chttp2_settings_parameters[i].default_value;
}
}
t->dirtied_local_settings = 1;
/* Hack: it's common for implementations to assume 65536 bytes initial send
window -- this should by rights be 0 */
t->force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
t->sent_local_settings = 0;
t->write_buffer_size = grpc_core::chttp2::kDefaultWindow;
if (is_client) {
grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
GRPC_CHTTP2_CLIENT_CONNECT_STRING));
}
/* configure http2 the way we like it */
if (is_client) {
queue_setting_update(t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
}
queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
DEFAULT_MAX_HEADER_LIST_SIZE);
queue_setting_update(t, GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA,
1);
t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
t->ping_policy.min_sent_ping_interval_without_data =
g_default_min_sent_ping_interval_without_data_ms;
t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
t->ping_policy.min_recv_ping_interval_without_data =
g_default_min_recv_ping_interval_without_data_ms;
/* Keepalive setting */
if (t->is_client) {
t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_client_keepalive_time_ms;
t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_client_keepalive_timeout_ms;
t->keepalive_permit_without_calls =
g_default_client_keepalive_permit_without_calls;
} else {
t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_server_keepalive_time_ms;
t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_server_keepalive_timeout_ms;
t->keepalive_permit_without_calls =
g_default_server_keepalive_permit_without_calls;
}
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
bool enable_bdp = true;
if (channel_args) {
for (i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER)) {
@ -370,8 +247,8 @@ static void init_transport(grpc_chttp2_transport* t,
if (value >= 0) {
if ((t->next_stream_id & 1) != (value & 1)) {
gpr_log(GPR_ERROR, "%s: low bit must be %d on %s",
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER,
t->next_stream_id & 1, is_client ? "client" : "server");
GRPC_ARG_HTTP2_INITIAL_SEQUENCE_NUMBER, t->next_stream_id & 1,
is_client ? "client" : "server");
} else {
t->next_stream_id = static_cast<uint32_t>(value);
}
@ -395,8 +272,7 @@ static void init_transport(grpc_chttp2_transport* t,
t->ping_policy.max_ping_strikes = grpc_channel_arg_get_integer(
&channel_args->args[i], {g_default_max_ping_strikes, 0, INT_MAX});
} else if (0 ==
strcmp(
channel_args->args[i].key,
strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_sent_ping_interval_without_data =
grpc_channel_arg_get_integer(
@ -405,8 +281,7 @@ static void init_transport(grpc_chttp2_transport* t,
g_default_min_sent_ping_interval_without_data_ms, 0,
INT_MAX});
} else if (0 ==
strcmp(
channel_args->args[i].key,
strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS)) {
t->ping_policy.min_recv_ping_interval_without_data =
grpc_channel_arg_get_integer(
@ -416,14 +291,13 @@ static void init_transport(grpc_chttp2_transport* t,
INT_MAX});
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_HTTP2_WRITE_BUFFER_SIZE)) {
t->write_buffer_size =
static_cast<uint32_t>(grpc_channel_arg_get_integer(
t->write_buffer_size = static_cast<uint32_t>(grpc_channel_arg_get_integer(
&channel_args->args[i], {0, 0, MAX_WRITE_BUFFER_SIZE}));
} else if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_HTTP2_BDP_PROBE)) {
enable_bdp = grpc_channel_arg_get_bool(&channel_args->args[i], true);
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_TIME_MS)) {
} else if (0 ==
strcmp(channel_args->args[i].key, GRPC_ARG_KEEPALIVE_TIME_MS)) {
const int value = grpc_channel_arg_get_integer(
&channel_args->args[i],
grpc_integer_options{t->is_client
@ -439,8 +313,7 @@ static void init_transport(grpc_chttp2_transport* t,
? g_default_client_keepalive_timeout_ms
: g_default_server_keepalive_timeout_ms,
0, INT_MAX});
t->keepalive_timeout =
value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
t->keepalive_timeout = value == INT_MAX ? GRPC_MILLIS_INF_FUTURE : value;
} else if (0 == strcmp(channel_args->args[i].key,
GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) {
t->keepalive_permit_without_calls = static_cast<uint32_t>(
@ -468,8 +341,7 @@ static void init_transport(grpc_chttp2_transport* t,
grpc_chttp2_setting_id setting_id;
grpc_integer_options integer_options;
bool availability[2] /* server, client */;
} settings_map[] = {
{GRPC_ARG_MAX_CONCURRENT_STREAMS,
} settings_map[] = {{GRPC_ARG_MAX_CONCURRENT_STREAMS,
GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS,
{-1, 0, INT32_MAX},
{true, false}},
@ -513,6 +385,162 @@ static void init_transport(grpc_chttp2_transport* t,
}
}
}
return enable_bdp;
}
static void init_transport_closures(grpc_chttp2_transport* t) {
GRPC_CLOSURE_INIT(&t->read_action_locked, read_action_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->benign_reclaimer_locked, benign_reclaimer_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->destructive_reclaimer_locked,
destructive_reclaimer_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->retry_initiate_ping_locked, retry_initiate_ping_locked,
t, grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->start_bdp_ping_locked, start_bdp_ping_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked, finish_bdp_ping_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->next_bdp_ping_timer_expired_locked,
next_bdp_ping_timer_expired_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked, init_keepalive_ping_locked,
t, grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked,
start_keepalive_ping_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked,
finish_keepalive_ping_locked, t,
grpc_combiner_scheduler(t->combiner));
GRPC_CLOSURE_INIT(&t->keepalive_watchdog_fired_locked,
keepalive_watchdog_fired_locked, t,
grpc_combiner_scheduler(t->combiner));
}
static void init_transport_keepalive_settings(grpc_chttp2_transport* t) {
if (t->is_client) {
t->keepalive_time = g_default_client_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_client_keepalive_time_ms;
t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_client_keepalive_timeout_ms;
t->keepalive_permit_without_calls =
g_default_client_keepalive_permit_without_calls;
} else {
t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_server_keepalive_time_ms;
t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX
? GRPC_MILLIS_INF_FUTURE
: g_default_server_keepalive_timeout_ms;
t->keepalive_permit_without_calls =
g_default_server_keepalive_permit_without_calls;
}
}
static void configure_transport_ping_policy(grpc_chttp2_transport* t) {
t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
t->ping_policy.min_sent_ping_interval_without_data =
g_default_min_sent_ping_interval_without_data_ms;
t->ping_policy.max_ping_strikes = g_default_max_ping_strikes;
t->ping_policy.min_recv_ping_interval_without_data =
g_default_min_recv_ping_interval_without_data_ms;
}
static void init_keepalive_pings_if_enabled(grpc_chttp2_transport* t) {
if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
grpc_timer_init(&t->keepalive_ping_timer,
grpc_core::ExecCtx::Get()->Now() + t->keepalive_time,
&t->init_keepalive_ping_locked);
} else {
/* Use GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED to indicate there are no
inflight keeaplive timers */
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
}
static void init_transport(grpc_chttp2_transport* t,
const grpc_channel_args* channel_args,
grpc_endpoint* ep, bool is_client) {
GPR_ASSERT(strlen(GRPC_CHTTP2_CLIENT_CONNECT_STRING) ==
GRPC_CHTTP2_CLIENT_CONNECT_STRLEN);
t->base.vtable = get_vtable();
t->ep = ep;
/* one ref is for destroy */
gpr_ref_init(&t->refs, 1);
t->combiner = grpc_combiner_create();
t->peer_string = grpc_endpoint_get_peer(ep);
t->endpoint_reading = 1;
t->next_stream_id = is_client ? 1 : 2;
t->is_client = is_client;
t->deframe_state = is_client ? GRPC_DTS_FH_0 : GRPC_DTS_CLIENT_PREFIX_0;
t->is_first_frame = true;
grpc_connectivity_state_init(
&t->channel_callback.state_tracker, GRPC_CHANNEL_READY,
is_client ? "client_transport" : "server_transport");
grpc_slice_buffer_init(&t->qbuf);
grpc_slice_buffer_init(&t->outbuf);
grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
init_transport_closures(t);
t->goaway_error = GRPC_ERROR_NONE;
grpc_chttp2_goaway_parser_init(&t->goaway_parser);
grpc_chttp2_hpack_parser_init(&t->hpack_parser);
grpc_slice_buffer_init(&t->read_buffer);
/* 8 is a random stab in the dark as to a good initial size: it's small enough
that it shouldn't waste memory for infrequently used connections, yet
large enough that the exponential growth should happen nicely when it's
needed.
TODO(ctiller): tune this */
grpc_chttp2_stream_map_init(&t->stream_map, 8);
/* copy in initial settings to all setting sets */
size_t i;
int j;
for (i = 0; i < GRPC_CHTTP2_NUM_SETTINGS; i++) {
for (j = 0; j < GRPC_NUM_SETTING_SETS; j++) {
t->settings[j][i] = grpc_chttp2_settings_parameters[i].default_value;
}
}
t->dirtied_local_settings = 1;
/* Hack: it's common for implementations to assume 65536 bytes initial send
window -- this should by rights be 0 */
t->force_send_settings = 1 << GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
t->sent_local_settings = 0;
t->write_buffer_size = grpc_core::chttp2::kDefaultWindow;
if (is_client) {
grpc_slice_buffer_add(&t->outbuf, grpc_slice_from_copied_string(
GRPC_CHTTP2_CLIENT_CONNECT_STRING));
}
/* configure http2 the way we like it */
if (is_client) {
queue_setting_update(t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
}
queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
DEFAULT_MAX_HEADER_LIST_SIZE);
queue_setting_update(t, GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA,
1);
configure_transport_ping_policy(t);
init_transport_keepalive_settings(t);
t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
bool enable_bdp = true;
if (channel_args) {
enable_bdp = read_channel_args(t, channel_args, is_client);
}
if (g_flow_control_enabled) {
@ -531,23 +559,11 @@ static void init_transport(grpc_chttp2_transport* t,
t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
t->ping_recv_state.ping_strikes = 0;
/* Start keepalive pings */
if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
grpc_timer_init(&t->keepalive_ping_timer,
grpc_core::ExecCtx::Get()->Now() + t->keepalive_time,
&t->init_keepalive_ping_locked);
} else {
/* Use GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED to indicate there are no
inflight keeaplive timers */
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED;
}
init_keepalive_pings_if_enabled(t);
if (enable_bdp) {
GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping");
schedule_bdp_ping_locked(t);
grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t,
nullptr);
}
@ -2887,17 +2903,20 @@ bool Chttp2IncomingByteStream::Next(size_t max_size_hint,
}
}
void Chttp2IncomingByteStream::MaybeCreateStreamDecompressionCtx() {
if (!stream_->stream_decompression_ctx) {
stream_->stream_decompression_ctx = grpc_stream_compression_context_create(
stream_->stream_decompression_method);
}
}
grpc_error* Chttp2IncomingByteStream::Pull(grpc_slice* slice) {
GPR_TIMER_SCOPE("incoming_byte_stream_pull", 0);
grpc_error* error;
if (stream_->unprocessed_incoming_frames_buffer.length > 0) {
if (!stream_->unprocessed_incoming_frames_decompressed) {
bool end_of_context;
if (!stream_->stream_decompression_ctx) {
stream_->stream_decompression_ctx =
grpc_stream_compression_context_create(
stream_->stream_decompression_method);
}
MaybeCreateStreamDecompressionCtx();
if (!grpc_stream_decompress(stream_->stream_decompression_ctx,
&stream_->unprocessed_incoming_frames_buffer,
&stream_->decompressed_data_buffer, nullptr,

@ -246,6 +246,8 @@ class Chttp2IncomingByteStream : public ByteStream {
static void NextLocked(void* arg, grpc_error* error_ignored);
static void OrphanLocked(void* arg, grpc_error* error_ignored);
void MaybeCreateStreamDecompressionCtx();
grpc_chttp2_transport* transport_; // Immutable.
grpc_chttp2_stream* stream_; // Immutable.

Loading…
Cancel
Save