Trim deadline setting out of call creation path: reduces ClientChannelFilter init/destroy from 128ns to 46ns on my machine

pull/9906/head
Craig Tiller 8 years ago
parent cd232f64c6
commit 7acc37e502
  1. 81
      src/core/ext/client_channel/client_channel.c
  2. 21
      src/core/lib/channel/deadline_filter.c
  3. 10
      src/core/lib/channel/deadline_filter.h
  4. 10
      test/core/end2end/tests/negative_deadline.c

@ -71,7 +71,8 @@
*/
typedef enum {
WAIT_FOR_READY_UNSET,
/* zero so it can be default initialized */
WAIT_FOR_READY_UNSET = 0,
WAIT_FOR_READY_FALSE,
WAIT_FOR_READY_TRUE
} wait_for_ready_value;
@ -608,7 +609,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
#define CANCELLED_CALL ((grpc_subchannel_call *)1)
typedef enum {
GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
/* zero so that it can be default-initialized */
GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING = 0,
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
} subchannel_creation_phase;
@ -1066,8 +1068,8 @@ static void read_service_config_locked(grpc_exec_ctx *exec_ctx, void *arg,
gpr_time_add(calld->call_start_time, method_params->timeout);
if (gpr_time_cmp(per_method_deadline, calld->deadline) < 0) {
calld->deadline = per_method_deadline;
// Reset deadline timer.
grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
// Start deadline timer.
grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
}
}
if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
@ -1082,85 +1084,20 @@ static void read_service_config_locked(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config");
}
static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx,
void *arg,
grpc_error *error_ignored) {
grpc_call_element *elem = arg;
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
// If the resolver has already returned results, then we can access
// the service config parameters immediately. Otherwise, we need to
// defer that work until the resolver returns an initial result.
// TODO(roth): This code is almost but not quite identical to the code
// in read_service_config() above. It would be nice to find a way to
// combine them, to avoid having to maintain it twice.
if (chand->lb_policy != NULL) {
// We already have a resolver result, so check for service config.
if (chand->method_params_table != NULL) {
grpc_slice_hash_table *method_params_table =
grpc_slice_hash_table_ref(chand->method_params_table);
method_parameters *method_params = grpc_method_config_table_get(
exec_ctx, method_params_table, calld->path);
if (method_params != NULL) {
if (gpr_time_cmp(method_params->timeout,
gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) {
gpr_timespec per_method_deadline =
gpr_time_add(calld->call_start_time, method_params->timeout);
calld->deadline = gpr_time_min(calld->deadline, per_method_deadline);
}
if (method_params->wait_for_ready != WAIT_FOR_READY_UNSET) {
calld->wait_for_ready_from_service_config =
method_params->wait_for_ready;
}
}
grpc_slice_hash_table_unref(exec_ctx, method_params_table);
}
} else {
// We don't yet have a resolver result, so register a callback to
// get the service config data once the resolver returns.
// Take a reference to the call stack to be owned by the callback.
GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
grpc_closure_init(&calld->read_service_config, read_service_config_locked,
elem, grpc_combiner_scheduler(chand->combiner, false));
grpc_closure_list_append(&chand->waiting_for_config_closures,
&calld->read_service_config, GRPC_ERROR_NONE);
}
// Start the deadline timer with the current deadline value. If we
// do not yet have service config data, then the timer may be reset
// later.
grpc_deadline_state_start(exec_ctx, elem, calld->deadline);
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
"initial_read_service_config");
}
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
const grpc_call_element_args *args) {
channel_data *chand = elem->channel_data;
call_data *calld = elem->call_data;
channel_data *chand = elem->channel_data;
// Initialize data members.
grpc_deadline_state_init(exec_ctx, elem, args->call_stack);
calld->path = grpc_slice_ref_internal(args->path);
calld->call_start_time = args->start_time;
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET;
calld->cancel_error = GRPC_ERROR_NONE;
gpr_atm_rel_store(&calld->subchannel_call, 0);
calld->connected_subchannel = NULL;
calld->waiting_ops = NULL;
calld->waiting_ops_count = 0;
calld->waiting_ops_capacity = 0;
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
calld->owning_call = args->call_stack;
calld->pollent = NULL;
GRPC_CALL_STACK_REF(calld->owning_call, "initial_read_service_config");
grpc_closure_sched(
exec_ctx,
grpc_closure_init(&calld->read_service_config,
initial_read_service_config_locked, elem,
grpc_combiner_scheduler(chand->combiner, false)),
GRPC_ERROR_NONE);
grpc_closure_init(&calld->read_service_config, read_service_config_locked,
elem, grpc_combiner_scheduler(chand->combiner, false));
return GRPC_ERROR_NONE;
}

@ -78,21 +78,9 @@ retry:
(grpc_deadline_timer_state)gpr_atm_acq_load(&deadline_state->timer_state);
switch (cur_state) {
case GRPC_DEADLINE_STATE_PENDING:
case GRPC_DEADLINE_STATE_FINISHED:
// Note: We do not start the timer if there is already a timer
return;
case GRPC_DEADLINE_STATE_FINISHED:
if (gpr_atm_rel_cas(&deadline_state->timer_state,
GRPC_DEADLINE_STATE_FINISHED,
GRPC_DEADLINE_STATE_PENDING)) {
// If we've already created and destroyed a timer, we always create a
// new closure: we have no other guarantee that the inlined closure is
// not in use (it may hold a pending call to timer_callback)
closure = grpc_closure_create(timer_callback, elem,
grpc_schedule_on_exec_ctx);
} else {
goto retry;
}
break;
case GRPC_DEADLINE_STATE_INITIAL:
if (gpr_atm_rel_cas(&deadline_state->timer_state,
GRPC_DEADLINE_STATE_INITIAL,
@ -189,13 +177,6 @@ void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
}
}
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
cancel_timer_if_needed(exec_ctx, deadline_state);
start_timer_if_needed(exec_ctx, elem, new_deadline);
}
void grpc_deadline_state_client_start_transport_stream_op(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op* op) {

@ -73,16 +73,6 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
void grpc_deadline_state_start(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec deadline);
// Cancels the existing timer and starts a new one with new_deadline.
//
// Note: It is generally safe to call this with an earlier deadline
// value than the current one, but not the reverse. No checks are done
// to ensure that the timer callback is not invoked while it is in the
// process of being reset, which means that attempting to increase the
// deadline may result in the timer being called twice.
void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
gpr_timespec new_deadline);
// To be called from the client-side filter's start_transport_stream_op()
// method. Ensures that the deadline timer is cancelled when the call
// is completed.

@ -124,6 +124,11 @@ static void simple_request_body(grpc_end2end_test_config config,
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
@ -136,11 +141,6 @@ static void simple_request_body(grpc_end2end_test_config config,
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;

Loading…
Cancel
Save