Remove debugging code.

pull/7970/head
Mark D. Roth 9 years ago
parent 75d7478d12
commit 6ad991783f
  1. 19
      src/core/ext/client_config/client_channel.c
  2. 17
      src/core/lib/channel/deadline_filter.c
  3. 6
      src/core/lib/channel/deadline_filter.h
  4. 4
      src/core/lib/surface/call.c

@ -380,11 +380,10 @@ typedef enum {
typedef struct client_channel_call_data {
// State for handling deadlines.
// The code in deadline_filter.c requires this to be the first field.
// FIXME
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
// and this struct both independently store a pointer to the call
// stack and each has its own mutex. If/when we have time, find a way
// to avoid this without breaking either abstraction.
// to avoid this without breaking the grpc_deadline_state abstraction.
grpc_deadline_state deadline_state;
/** either 0 for no call, 1 for cancelled, or a pointer to a
@ -466,9 +465,6 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) {
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
const char* msg = grpc_error_string(error);
gpr_log(GPR_INFO, "==> %s(): error=%s", __func__, msg);
grpc_error_free_string(msg);
call_data *calld = arg;
gpr_mu_lock(&calld->mu);
GPR_ASSERT(calld->creation_phase ==
@ -547,8 +543,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
uint32_t initial_metadata_flags,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready, grpc_error *error) {
gpr_log(GPR_INFO, "==> %s()", __func__);
GPR_TIMER_BEGIN("pick_subchannel", 0);
channel_data *chand = elem->channel_data;
@ -561,13 +555,11 @@ gpr_log(GPR_INFO, "==> %s()", __func__);
gpr_mu_lock(&chand->mu);
if (initial_metadata == NULL) {
if (chand->lb_policy != NULL) {
gpr_log(GPR_INFO, "asking LB policy to cancel pick");
grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
connected_subchannel, GRPC_ERROR_REF(error));
}
for (closure = chand->waiting_for_config_closures.head; closure != NULL;
closure = closure->next_data.next) {
gpr_log(GPR_INFO, "top of closure loop");
cpa = closure->cb_arg;
if (cpa->connected_subchannel == connected_subchannel) {
cpa->connected_subchannel = NULL;
@ -579,7 +571,6 @@ gpr_log(GPR_INFO, "top of closure loop");
gpr_mu_unlock(&chand->mu);
GPR_TIMER_END("pick_subchannel", 0);
GRPC_ERROR_UNREF(error);
gpr_log(GPR_INFO, "returning from pick_subchannel()");
return true;
}
GPR_ASSERT(error == GRPC_ERROR_NONE);
@ -629,7 +620,6 @@ gpr_log(GPR_INFO, "returning from pick_subchannel()");
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
gpr_log(GPR_INFO, "==> %s()", __func__);
call_data *calld = elem->call_data;
GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
@ -669,16 +659,13 @@ retry:
if (op->cancel_error != GRPC_ERROR_NONE) {
if (!gpr_atm_rel_cas(&calld->subchannel_call, 0,
(gpr_atm)(uintptr_t)CANCELLED_CALL)) {
gpr_log(GPR_INFO, "CANCELLED_CALL");
goto retry;
} else {
switch (calld->creation_phase) {
case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
gpr_log(GPR_INFO, "GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING");
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error));
break;
case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
gpr_log(GPR_INFO, "GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL");
pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel,
NULL, GRPC_ERROR_REF(op->cancel_error));
break;
@ -734,10 +721,6 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element_args *args) {
call_data *calld = elem->call_data;
grpc_deadline_state_init(&calld->deadline_state, args->call_stack);
// FIXME: remove
calld->deadline_state.is_client = true;
gpr_atm_rel_store(&calld->subchannel_call, 0);
gpr_mu_init(&calld->mu);
calld->connected_subchannel = NULL;

@ -49,12 +49,10 @@ static void timer_callback(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
grpc_call_element* elem = arg;
grpc_deadline_state* deadline_state = elem->call_data;
gpr_log(GPR_INFO, "==> %s(), is_client=%d", __func__, deadline_state->is_client);
gpr_mu_lock(&deadline_state->timer_mu);
deadline_state->timer_pending = false;
gpr_mu_unlock(&deadline_state->timer_mu);
if (error != GRPC_ERROR_CANCELLED) {
gpr_log(GPR_INFO, "DEADLINE_EXCEEDED");
gpr_slice msg = gpr_slice_from_static_string("Deadline Exceeded");
grpc_call_element_send_cancel_with_message(exec_ctx, elem,
GRPC_STATUS_DEADLINE_EXCEEDED,
@ -69,13 +67,11 @@ static void start_timer_if_needed(grpc_exec_ctx *exec_ctx,
grpc_call_element* elem,
gpr_timespec deadline) {
grpc_deadline_state* deadline_state = elem->call_data;
gpr_log(GPR_INFO, "==> %s(), is_client=%d", __func__, deadline_state->is_client);
deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC);
if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
// Take a reference to the call stack, to be owned by the timer.
GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
gpr_mu_lock(&deadline_state->timer_mu);
gpr_log(GPR_INFO, "STARTING TIMER -- is_client=%d", deadline_state->is_client);
deadline_state->timer_pending = true;
grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, timer_callback,
elem, gpr_now(GPR_CLOCK_MONOTONIC));
@ -86,10 +82,8 @@ gpr_log(GPR_INFO, "STARTING TIMER -- is_client=%d", deadline_state->is_client);
// Cancels the deadline timer.
static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
gpr_log(GPR_INFO, "==> %s(), is_client=%d", __func__, deadline_state->is_client);
gpr_mu_lock(&deadline_state->timer_mu);
if (deadline_state->timer_pending) {
gpr_log(GPR_INFO, "CANCELLING TIMER -- is_client=%d", deadline_state->is_client);
grpc_timer_cancel(exec_ctx, &deadline_state->timer);
deadline_state->timer_pending = false;
}
@ -99,7 +93,6 @@ gpr_log(GPR_INFO, "CANCELLING TIMER -- is_client=%d", deadline_state->is_client)
// Callback run when the call is complete.
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
grpc_deadline_state* deadline_state = arg;
gpr_log(GPR_INFO, "==> %s(), is_client=%d, next_on_complete->cb=%p", __func__, deadline_state->is_client, deadline_state->next_on_complete->cb);
cancel_timer_if_needed(exec_ctx, deadline_state);
// Invoke the next callback.
deadline_state->next_on_complete->cb(
@ -109,7 +102,6 @@ gpr_log(GPR_INFO, "==> %s(), is_client=%d, next_on_complete->cb=%p", __func__, d
// Inject our own on_complete callback into op.
static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
grpc_transport_stream_op* op) {
gpr_log(GPR_INFO, "==> %s(), is_client=%d", __func__, deadline_state->is_client);
deadline_state->next_on_complete = op->on_complete;
grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state);
op->on_complete = &deadline_state->on_complete;
@ -117,7 +109,6 @@ gpr_log(GPR_INFO, "==> %s(), is_client=%d", __func__, deadline_state->is_client)
void grpc_deadline_state_init(grpc_deadline_state* deadline_state,
grpc_call_stack* call_stack) {
gpr_log(GPR_INFO, "==> %s()", __func__);
memset(deadline_state, 0, sizeof(*deadline_state));
deadline_state->call_stack = call_stack;
gpr_mu_init(&deadline_state->timer_mu);
@ -125,7 +116,6 @@ gpr_log(GPR_INFO, "==> %s()", __func__);
void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
grpc_deadline_state* deadline_state) {
gpr_log(GPR_INFO, "==> %s(), is_client=%d", __func__, deadline_state->is_client);
cancel_timer_if_needed(exec_ctx, deadline_state);
gpr_mu_destroy(&deadline_state->timer_mu);
}
@ -133,7 +123,6 @@ gpr_log(GPR_INFO, "==> %s(), is_client=%d", __func__, deadline_state->is_client)
void grpc_deadline_state_client_start_transport_stream_op(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op* op) {
gpr_log(GPR_INFO, "==> %s(): op=%p {on_complete=%p, cancel_error=%p, close_error=%p, send_initial_metadata=%p, send_trailing_metadata=%p, send_message=%p, recv_initial_metadata_ready=%p, recv_trailing_metadata=%p}", __func__, op, op->on_complete, op->cancel_error, op->close_error, op->send_initial_metadata, op->send_trailing_metadata, op->send_message, op->recv_initial_metadata_ready, op->recv_trailing_metadata);
grpc_deadline_state* deadline_state = elem->call_data;
if (op->cancel_error != GRPC_ERROR_NONE ||
op->close_error != GRPC_ERROR_NONE) {
@ -194,14 +183,10 @@ typedef struct server_call_data {
static grpc_error *init_call_elem(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_call_element_args* args) {
gpr_log(GPR_INFO, "==> %s() -- call_data_size=%lu", __func__, (unsigned long)elem->filter->sizeof_call_data);
base_call_data* calld = elem->call_data;
// Note: size of call data is different between client and server.
memset(calld, 0, elem->filter->sizeof_call_data);
grpc_deadline_state_init(&calld->deadline_state, args->call_stack);
calld->deadline_state.is_client = elem->filter->sizeof_call_data == sizeof(base_call_data);
return GRPC_ERROR_NONE;
}
@ -217,7 +202,6 @@ static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
static void client_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_transport_stream_op* op) {
gpr_log(GPR_INFO, "==> %s()", __func__);
grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
// Chain to next filter.
grpc_call_next_op(exec_ctx, elem, op);
@ -240,7 +224,6 @@ static void recv_initial_metadata_ready(grpc_exec_ctx *exec_ctx, void *arg,
static void server_start_transport_stream_op(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
grpc_transport_stream_op* op) {
gpr_log(GPR_INFO, "==> %s(): op=%p {on_complete=%p, cancel_error=%p, close_error=%p, send_initial_metadata=%p, send_trailing_metadata=%p, send_message=%p, recv_initial_metadata_ready=%p, recv_trailing_metadata=%p}", __func__, op, op->on_complete, op->cancel_error, op->close_error, op->send_initial_metadata, op->send_trailing_metadata, op->send_message, op->recv_initial_metadata_ready, op->recv_trailing_metadata);
server_call_data* calld = elem->call_data;
if (op->cancel_error != GRPC_ERROR_NONE ||
op->close_error != GRPC_ERROR_NONE) {

@ -52,10 +52,6 @@ typedef struct grpc_deadline_state {
// The original on_complete closure, which we chain to after our own
// closure is invoked.
grpc_closure* next_on_complete;
// FIXME: remove
bool is_client;
} grpc_deadline_state;
void grpc_deadline_state_init(grpc_deadline_state* call_data,
@ -67,7 +63,7 @@ void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
// enforce call deadlines.
// It is the caller's responsibility to chain to the next filter if
// necessary after this function returns.
// REQUIRES: The first field in elem is a grpc_deadline_state struct.
// REQUIRES: The first field in elem->call_data is a grpc_deadline_state.
void grpc_deadline_state_client_start_transport_stream_op(
grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
grpc_transport_stream_op* op);

@ -1241,10 +1241,6 @@ static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp,
grpc_call *child_call;
grpc_call *next_child_call;
const char* msg = grpc_error_string(error);
gpr_log(GPR_INFO, "==> finish_batch(): is_client=%d, error=%s", call->is_client, msg);
grpc_error_free_string(msg);
GRPC_ERROR_REF(error);
gpr_mu_lock(&call->mu);

Loading…
Cancel
Save