|
|
@ -796,8 +796,7 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
// send_message
|
|
|
|
// send_message
|
|
|
|
// recv_trailing_metadata
|
|
|
|
// recv_trailing_metadata
|
|
|
|
// send_trailing_metadata
|
|
|
|
// send_trailing_metadata
|
|
|
|
// We also add room for a single cancel_stream batch.
|
|
|
|
#define MAX_WAITING_BATCHES 6 |
|
|
|
#define MAX_WAITING_BATCHES 7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/** Call data. Holds a pointer to grpc_subchannel_call and the
|
|
|
|
/** Call data. Holds a pointer to grpc_subchannel_call and the
|
|
|
|
associated machinery to create such a pointer. |
|
|
|
associated machinery to create such a pointer. |
|
|
@ -809,25 +808,23 @@ typedef struct client_channel_call_data { |
|
|
|
// The code in deadline_filter.c requires this to be the first field.
|
|
|
|
// The code in deadline_filter.c requires this to be the first field.
|
|
|
|
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
|
|
|
|
// TODO(roth): This is slightly sub-optimal in that grpc_deadline_state
|
|
|
|
// and this struct both independently store a pointer to the call
|
|
|
|
// and this struct both independently store a pointer to the call
|
|
|
|
// combiner. If/when we have time, find a way to avoid this without
|
|
|
|
// stack and each has its own mutex. If/when we have time, find a way
|
|
|
|
// breaking the grpc_deadline_state abstraction.
|
|
|
|
// to avoid this without breaking the grpc_deadline_state abstraction.
|
|
|
|
grpc_deadline_state deadline_state; |
|
|
|
grpc_deadline_state deadline_state; |
|
|
|
|
|
|
|
|
|
|
|
grpc_slice path; // Request path.
|
|
|
|
grpc_slice path; // Request path.
|
|
|
|
gpr_timespec call_start_time; |
|
|
|
gpr_timespec call_start_time; |
|
|
|
gpr_timespec deadline; |
|
|
|
gpr_timespec deadline; |
|
|
|
gpr_arena *arena; |
|
|
|
|
|
|
|
grpc_call_combiner *call_combiner; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
grpc_server_retry_throttle_data *retry_throttle_data; |
|
|
|
grpc_server_retry_throttle_data *retry_throttle_data; |
|
|
|
method_parameters *method_params; |
|
|
|
method_parameters *method_params; |
|
|
|
|
|
|
|
|
|
|
|
grpc_subchannel_call *subchannel_call; |
|
|
|
/** either 0 for no call, a pointer to a grpc_subchannel_call (if the lowest
|
|
|
|
grpc_error *error; |
|
|
|
bit is 0), or a pointer to an error (if the lowest bit is 1) */ |
|
|
|
|
|
|
|
gpr_atm subchannel_call_or_error; |
|
|
|
|
|
|
|
gpr_arena *arena; |
|
|
|
|
|
|
|
|
|
|
|
grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
|
|
|
|
grpc_lb_policy *lb_policy; // Holds ref while LB pick is pending.
|
|
|
|
grpc_closure lb_pick_closure; |
|
|
|
grpc_closure lb_pick_closure; |
|
|
|
grpc_closure cancel_closure; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
grpc_connected_subchannel *connected_subchannel; |
|
|
|
grpc_connected_subchannel *connected_subchannel; |
|
|
|
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT]; |
|
|
|
grpc_call_context_element subchannel_call_context[GRPC_CONTEXT_COUNT]; |
|
|
@ -835,9 +832,10 @@ typedef struct client_channel_call_data { |
|
|
|
|
|
|
|
|
|
|
|
grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES]; |
|
|
|
grpc_transport_stream_op_batch *waiting_for_pick_batches[MAX_WAITING_BATCHES]; |
|
|
|
size_t waiting_for_pick_batches_count; |
|
|
|
size_t waiting_for_pick_batches_count; |
|
|
|
grpc_closure handle_pending_batch_in_call_combiner[MAX_WAITING_BATCHES]; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
grpc_transport_stream_op_batch *initial_metadata_batch; |
|
|
|
grpc_transport_stream_op_batch_payload *initial_metadata_payload; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
grpc_call_stack *owning_call; |
|
|
|
|
|
|
|
|
|
|
|
grpc_linked_mdelem lb_token_mdelem; |
|
|
|
grpc_linked_mdelem lb_token_mdelem; |
|
|
|
|
|
|
|
|
|
|
@ -845,40 +843,53 @@ typedef struct client_channel_call_data { |
|
|
|
grpc_closure *original_on_complete; |
|
|
|
grpc_closure *original_on_complete; |
|
|
|
} call_data; |
|
|
|
} call_data; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
typedef struct { |
|
|
|
|
|
|
|
grpc_subchannel_call *subchannel_call; |
|
|
|
|
|
|
|
grpc_error *error; |
|
|
|
|
|
|
|
} call_or_error; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static call_or_error get_call_or_error(call_data *p) { |
|
|
|
|
|
|
|
gpr_atm c = gpr_atm_acq_load(&p->subchannel_call_or_error); |
|
|
|
|
|
|
|
if (c == 0) |
|
|
|
|
|
|
|
return (call_or_error){NULL, NULL}; |
|
|
|
|
|
|
|
else if (c & 1) |
|
|
|
|
|
|
|
return (call_or_error){NULL, (grpc_error *)((c) & ~(gpr_atm)1)}; |
|
|
|
|
|
|
|
else |
|
|
|
|
|
|
|
return (call_or_error){(grpc_subchannel_call *)c, NULL}; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static bool set_call_or_error(call_data *p, call_or_error coe) { |
|
|
|
|
|
|
|
// this should always be under a lock
|
|
|
|
|
|
|
|
call_or_error existing = get_call_or_error(p); |
|
|
|
|
|
|
|
if (existing.error != GRPC_ERROR_NONE) { |
|
|
|
|
|
|
|
GRPC_ERROR_UNREF(coe.error); |
|
|
|
|
|
|
|
return false; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
GPR_ASSERT(existing.subchannel_call == NULL); |
|
|
|
|
|
|
|
if (coe.error != GRPC_ERROR_NONE) { |
|
|
|
|
|
|
|
GPR_ASSERT(coe.subchannel_call == NULL); |
|
|
|
|
|
|
|
gpr_atm_rel_store(&p->subchannel_call_or_error, 1 | (gpr_atm)coe.error); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
GPR_ASSERT(coe.subchannel_call != NULL); |
|
|
|
|
|
|
|
gpr_atm_rel_store(&p->subchannel_call_or_error, |
|
|
|
|
|
|
|
(gpr_atm)coe.subchannel_call); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return true; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
grpc_subchannel_call *grpc_client_channel_get_subchannel_call( |
|
|
|
grpc_subchannel_call *grpc_client_channel_get_subchannel_call( |
|
|
|
grpc_call_element *elem) { |
|
|
|
grpc_call_element *call_elem) { |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
return get_call_or_error(call_elem->call_data).subchannel_call; |
|
|
|
return calld->subchannel_call; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// This is called via the call combiner, so access to calld is synchronized.
|
|
|
|
static void waiting_for_pick_batches_add_locked( |
|
|
|
static void waiting_for_pick_batches_add( |
|
|
|
|
|
|
|
call_data *calld, grpc_transport_stream_op_batch *batch) { |
|
|
|
call_data *calld, grpc_transport_stream_op_batch *batch) { |
|
|
|
if (batch->send_initial_metadata) { |
|
|
|
|
|
|
|
GPR_ASSERT(calld->initial_metadata_batch == NULL); |
|
|
|
|
|
|
|
calld->initial_metadata_batch = batch; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES); |
|
|
|
GPR_ASSERT(calld->waiting_for_pick_batches_count < MAX_WAITING_BATCHES); |
|
|
|
calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] = |
|
|
|
calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count++] = |
|
|
|
batch; |
|
|
|
batch; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// This is called via the call combiner, so access to calld is synchronized.
|
|
|
|
static void waiting_for_pick_batches_fail_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
void *arg, grpc_error *error) { |
|
|
|
|
|
|
|
call_data *calld = arg; |
|
|
|
|
|
|
|
if (calld->waiting_for_pick_batches_count > 0) { |
|
|
|
|
|
|
|
--calld->waiting_for_pick_batches_count; |
|
|
|
|
|
|
|
grpc_transport_stream_op_batch_finish_with_failure( |
|
|
|
|
|
|
|
exec_ctx, |
|
|
|
|
|
|
|
calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count], |
|
|
|
|
|
|
|
GRPC_ERROR_REF(error), calld->call_combiner); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// This is called via the call combiner, so access to calld is synchronized.
|
|
|
|
|
|
|
|
static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
grpc_call_element *elem, |
|
|
|
grpc_call_element *elem, |
|
|
|
grpc_error *error) { |
|
|
|
grpc_error *error) { |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
@ -889,60 +900,34 @@ static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx, |
|
|
|
grpc_error_string(error)); |
|
|
|
grpc_error_string(error)); |
|
|
|
} |
|
|
|
} |
|
|
|
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { |
|
|
|
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { |
|
|
|
GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i], |
|
|
|
|
|
|
|
fail_pending_batch_in_call_combiner, calld, |
|
|
|
|
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
|
|
|
|
GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, |
|
|
|
|
|
|
|
&calld->handle_pending_batch_in_call_combiner[i], |
|
|
|
|
|
|
|
GRPC_ERROR_REF(error), |
|
|
|
|
|
|
|
"waiting_for_pick_batches_fail"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (calld->initial_metadata_batch != NULL) { |
|
|
|
|
|
|
|
grpc_transport_stream_op_batch_finish_with_failure( |
|
|
|
grpc_transport_stream_op_batch_finish_with_failure( |
|
|
|
exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error), |
|
|
|
exec_ctx, calld->waiting_for_pick_batches[i], GRPC_ERROR_REF(error)); |
|
|
|
calld->call_combiner); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, |
|
|
|
|
|
|
|
"waiting_for_pick_batches_fail"); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
calld->waiting_for_pick_batches_count = 0; |
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// This is called via the call combiner, so access to calld is synchronized.
|
|
|
|
static void waiting_for_pick_batches_resume_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
void *arg, grpc_error *ignored) { |
|
|
|
|
|
|
|
call_data *calld = arg; |
|
|
|
|
|
|
|
if (calld->waiting_for_pick_batches_count > 0) { |
|
|
|
|
|
|
|
--calld->waiting_for_pick_batches_count; |
|
|
|
|
|
|
|
grpc_subchannel_call_process_op( |
|
|
|
|
|
|
|
exec_ctx, calld->subchannel_call, |
|
|
|
|
|
|
|
calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// This is called via the call combiner, so access to calld is synchronized.
|
|
|
|
|
|
|
|
static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
grpc_call_element *elem) { |
|
|
|
grpc_call_element *elem) { |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
|
|
|
|
call_data *calld = elem->call_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
|
|
|
|
if (calld->waiting_for_pick_batches_count == 0) return; |
|
|
|
|
|
|
|
call_or_error coe = get_call_or_error(calld); |
|
|
|
|
|
|
|
if (coe.error != GRPC_ERROR_NONE) { |
|
|
|
|
|
|
|
waiting_for_pick_batches_fail_locked(exec_ctx, elem, |
|
|
|
|
|
|
|
GRPC_ERROR_REF(coe.error)); |
|
|
|
|
|
|
|
return; |
|
|
|
|
|
|
|
} |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIdPTR |
|
|
|
" pending batches to subchannel_call=%p", |
|
|
|
" pending batches to subchannel_call=%p", |
|
|
|
chand, calld, calld->waiting_for_pick_batches_count, |
|
|
|
elem->channel_data, calld, calld->waiting_for_pick_batches_count, |
|
|
|
calld->subchannel_call); |
|
|
|
coe.subchannel_call); |
|
|
|
} |
|
|
|
} |
|
|
|
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { |
|
|
|
for (size_t i = 0; i < calld->waiting_for_pick_batches_count; ++i) { |
|
|
|
GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i], |
|
|
|
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, |
|
|
|
run_pending_batch_in_call_combiner, calld, |
|
|
|
calld->waiting_for_pick_batches[i]); |
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
|
|
|
|
GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner, |
|
|
|
|
|
|
|
&calld->handle_pending_batch_in_call_combiner[i], |
|
|
|
|
|
|
|
GRPC_ERROR_NONE, |
|
|
|
|
|
|
|
"waiting_for_pick_batches_resume"); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
GPR_ASSERT(calld->initial_metadata_batch != NULL); |
|
|
|
calld->waiting_for_pick_batches_count = 0; |
|
|
|
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, |
|
|
|
|
|
|
|
calld->initial_metadata_batch); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Applies service config to the call. Must be invoked once we know
|
|
|
|
// Applies service config to the call. Must be invoked once we know
|
|
|
@ -983,28 +968,29 @@ static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
grpc_call_element *elem, |
|
|
|
grpc_call_element *elem, |
|
|
|
grpc_error *error) { |
|
|
|
grpc_error *error) { |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
|
|
|
|
call_data *calld = elem->call_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
|
|
|
|
grpc_subchannel_call *subchannel_call = NULL; |
|
|
|
const grpc_connected_subchannel_call_args call_args = { |
|
|
|
const grpc_connected_subchannel_call_args call_args = { |
|
|
|
.pollent = calld->pollent, |
|
|
|
.pollent = calld->pollent, |
|
|
|
.path = calld->path, |
|
|
|
.path = calld->path, |
|
|
|
.start_time = calld->call_start_time, |
|
|
|
.start_time = calld->call_start_time, |
|
|
|
.deadline = calld->deadline, |
|
|
|
.deadline = calld->deadline, |
|
|
|
.arena = calld->arena, |
|
|
|
.arena = calld->arena, |
|
|
|
.context = calld->subchannel_call_context, |
|
|
|
.context = calld->subchannel_call_context}; |
|
|
|
.call_combiner = calld->call_combiner}; |
|
|
|
|
|
|
|
grpc_error *new_error = grpc_connected_subchannel_create_call( |
|
|
|
grpc_error *new_error = grpc_connected_subchannel_create_call( |
|
|
|
exec_ctx, calld->connected_subchannel, &call_args, |
|
|
|
exec_ctx, calld->connected_subchannel, &call_args, &subchannel_call); |
|
|
|
&calld->subchannel_call); |
|
|
|
|
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s", |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s", |
|
|
|
chand, calld, calld->subchannel_call, grpc_error_string(new_error)); |
|
|
|
elem->channel_data, calld, subchannel_call, |
|
|
|
|
|
|
|
grpc_error_string(new_error)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
GPR_ASSERT(set_call_or_error( |
|
|
|
|
|
|
|
calld, (call_or_error){.subchannel_call = subchannel_call})); |
|
|
|
if (new_error != GRPC_ERROR_NONE) { |
|
|
|
if (new_error != GRPC_ERROR_NONE) { |
|
|
|
new_error = grpc_error_add_child(new_error, error); |
|
|
|
new_error = grpc_error_add_child(new_error, error); |
|
|
|
waiting_for_pick_batches_fail(exec_ctx, elem, new_error); |
|
|
|
waiting_for_pick_batches_fail_locked(exec_ctx, elem, new_error); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
waiting_for_pick_batches_resume(exec_ctx, elem); |
|
|
|
waiting_for_pick_batches_resume_locked(exec_ctx, elem); |
|
|
|
} |
|
|
|
} |
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
} |
|
|
|
} |
|
|
@ -1016,10 +1002,11 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, |
|
|
|
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, |
|
|
|
chand->interested_parties); |
|
|
|
chand->interested_parties); |
|
|
|
|
|
|
|
call_or_error coe = get_call_or_error(calld); |
|
|
|
if (calld->connected_subchannel == NULL) { |
|
|
|
if (calld->connected_subchannel == NULL) { |
|
|
|
// Failed to create subchannel.
|
|
|
|
// Failed to create subchannel.
|
|
|
|
GRPC_ERROR_UNREF(calld->error); |
|
|
|
grpc_error *failure = |
|
|
|
calld->error = error == GRPC_ERROR_NONE |
|
|
|
error == GRPC_ERROR_NONE |
|
|
|
? GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
|
|
|
? GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
|
|
|
"Call dropped by load balancing policy") |
|
|
|
"Call dropped by load balancing policy") |
|
|
|
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
|
|
|
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
|
|
@ -1027,16 +1014,48 @@ static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
"chand=%p calld=%p: failed to create subchannel: error=%s", chand, |
|
|
|
"chand=%p calld=%p: failed to create subchannel: error=%s", chand, |
|
|
|
calld, grpc_error_string(calld->error)); |
|
|
|
calld, grpc_error_string(failure)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(failure)}); |
|
|
|
|
|
|
|
waiting_for_pick_batches_fail_locked(exec_ctx, elem, failure); |
|
|
|
|
|
|
|
} else if (coe.error != GRPC_ERROR_NONE) { |
|
|
|
|
|
|
|
/* already cancelled before subchannel became ready */ |
|
|
|
|
|
|
|
grpc_error *child_errors[] = {error, coe.error}; |
|
|
|
|
|
|
|
grpc_error *cancellation_error = |
|
|
|
|
|
|
|
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
|
|
|
|
|
|
|
"Cancelled before creating subchannel", child_errors, |
|
|
|
|
|
|
|
GPR_ARRAY_SIZE(child_errors)); |
|
|
|
|
|
|
|
/* if due to deadline, attach the deadline exceeded status to the error */ |
|
|
|
|
|
|
|
if (gpr_time_cmp(calld->deadline, gpr_now(GPR_CLOCK_MONOTONIC)) < 0) { |
|
|
|
|
|
|
|
cancellation_error = |
|
|
|
|
|
|
|
grpc_error_set_int(cancellation_error, GRPC_ERROR_INT_GRPC_STATUS, |
|
|
|
|
|
|
|
GRPC_STATUS_DEADLINE_EXCEEDED); |
|
|
|
} |
|
|
|
} |
|
|
|
waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error)); |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
|
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
|
|
|
|
"chand=%p calld=%p: cancelled before subchannel became ready: %s", |
|
|
|
|
|
|
|
chand, calld, grpc_error_string(cancellation_error)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
waiting_for_pick_batches_fail_locked(exec_ctx, elem, cancellation_error); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
/* Create call on subchannel. */ |
|
|
|
/* Create call on subchannel. */ |
|
|
|
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); |
|
|
|
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); |
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) { |
|
|
|
|
|
|
|
call_data *calld = elem->call_data; |
|
|
|
|
|
|
|
grpc_subchannel_call *subchannel_call = |
|
|
|
|
|
|
|
get_call_or_error(calld).subchannel_call; |
|
|
|
|
|
|
|
if (subchannel_call == NULL) { |
|
|
|
|
|
|
|
return NULL; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/** Return true if subchannel is available immediately (in which case
|
|
|
|
/** Return true if subchannel is available immediately (in which case
|
|
|
|
subchannel_ready_locked() should not be called), or false otherwise (in |
|
|
|
subchannel_ready_locked() should not be called), or false otherwise (in |
|
|
|
which case subchannel_ready_locked() should be called when the subchannel |
|
|
|
which case subchannel_ready_locked() should be called when the subchannel |
|
|
@ -1050,44 +1069,6 @@ typedef struct { |
|
|
|
grpc_closure closure; |
|
|
|
grpc_closure closure; |
|
|
|
} pick_after_resolver_result_args; |
|
|
|
} pick_after_resolver_result_args; |
|
|
|
|
|
|
|
|
|
|
|
// Note: This runs under the client_channel combiner, but will NOT be
|
|
|
|
|
|
|
|
// holding the call combiner.
|
|
|
|
|
|
|
|
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
void *arg, |
|
|
|
|
|
|
|
grpc_error *error) { |
|
|
|
|
|
|
|
grpc_call_element *elem = arg; |
|
|
|
|
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
|
|
|
|
call_data *calld = elem->call_data; |
|
|
|
|
|
|
|
// If we don't yet have a resolver result, then a closure for
|
|
|
|
|
|
|
|
// pick_after_resolver_result_done_locked() will have been added to
|
|
|
|
|
|
|
|
// chand->waiting_for_resolver_result_closures, and it may not be invoked
|
|
|
|
|
|
|
|
// until after this call has been destroyed. We mark the operation as
|
|
|
|
|
|
|
|
// cancelled, so that when pick_after_resolver_result_done_locked()
|
|
|
|
|
|
|
|
// is called, it will be a no-op. We also immediately invoke
|
|
|
|
|
|
|
|
// subchannel_ready_locked() to propagate the error back to the caller.
|
|
|
|
|
|
|
|
for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head; |
|
|
|
|
|
|
|
closure != NULL; closure = closure->next_data.next) { |
|
|
|
|
|
|
|
pick_after_resolver_result_args *args = closure->cb_arg; |
|
|
|
|
|
|
|
if (!args->cancelled && args->elem == elem) { |
|
|
|
|
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
|
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
|
|
|
|
"chand=%p calld=%p: " |
|
|
|
|
|
|
|
"cancelling pick waiting for resolver result", |
|
|
|
|
|
|
|
chand, calld); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
args->cancelled = true; |
|
|
|
|
|
|
|
// Note: Although we are not in the call combiner here, we are
|
|
|
|
|
|
|
|
// basically stealing the call combiner from the pending pick, so
|
|
|
|
|
|
|
|
// it's safe to call subchannel_ready_locked() here -- we are
|
|
|
|
|
|
|
|
// essentially calling it here instead of calling it in
|
|
|
|
|
|
|
|
// pick_after_resolver_result_done_locked().
|
|
|
|
|
|
|
|
subchannel_ready_locked(exec_ctx, elem, |
|
|
|
|
|
|
|
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
|
|
|
|
|
|
|
"Pick cancelled", &error, 1)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
void *arg, |
|
|
|
void *arg, |
|
|
|
grpc_error *error) { |
|
|
|
grpc_error *error) { |
|
|
@ -1098,24 +1079,21 @@ static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
gpr_log(GPR_DEBUG, "call cancelled before resolver result"); |
|
|
|
gpr_log(GPR_DEBUG, "call cancelled before resolver result"); |
|
|
|
} |
|
|
|
} |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
grpc_call_element *elem = args->elem; |
|
|
|
channel_data *chand = args->elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
call_data *calld = args->elem->call_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
|
|
|
|
grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, |
|
|
|
|
|
|
|
NULL); |
|
|
|
|
|
|
|
if (error != GRPC_ERROR_NONE) { |
|
|
|
if (error != GRPC_ERROR_NONE) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data", |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data", |
|
|
|
chand, calld); |
|
|
|
chand, calld); |
|
|
|
} |
|
|
|
} |
|
|
|
subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); |
|
|
|
subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_REF(error)); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick", |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick", |
|
|
|
chand, calld); |
|
|
|
chand, calld); |
|
|
|
} |
|
|
|
} |
|
|
|
if (pick_subchannel_locked(exec_ctx, elem)) { |
|
|
|
if (pick_subchannel_locked(exec_ctx, args->elem)) { |
|
|
|
subchannel_ready_locked(exec_ctx, elem, GRPC_ERROR_NONE); |
|
|
|
subchannel_ready_locked(exec_ctx, args->elem, GRPC_ERROR_NONE); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
@ -1138,33 +1116,41 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
args, grpc_combiner_scheduler(chand->combiner)); |
|
|
|
args, grpc_combiner_scheduler(chand->combiner)); |
|
|
|
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures, |
|
|
|
grpc_closure_list_append(&chand->waiting_for_resolver_result_closures, |
|
|
|
&args->closure, GRPC_ERROR_NONE); |
|
|
|
&args->closure, GRPC_ERROR_NONE); |
|
|
|
grpc_call_combiner_set_notify_on_cancel( |
|
|
|
|
|
|
|
exec_ctx, calld->call_combiner, |
|
|
|
|
|
|
|
GRPC_CLOSURE_INIT(&calld->cancel_closure, |
|
|
|
|
|
|
|
pick_after_resolver_result_cancel_locked, elem, |
|
|
|
|
|
|
|
grpc_combiner_scheduler(chand->combiner))); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Note: This runs under the client_channel combiner, but will NOT be
|
|
|
|
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
// holding the call combiner.
|
|
|
|
grpc_call_element *elem, |
|
|
|
static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
|
|
|
|
grpc_error *error) { |
|
|
|
grpc_error *error) { |
|
|
|
grpc_call_element *elem = arg; |
|
|
|
|
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
if (calld->lb_policy != NULL) { |
|
|
|
// If we don't yet have a resolver result, then a closure for
|
|
|
|
|
|
|
|
// pick_after_resolver_result_done_locked() will have been added to
|
|
|
|
|
|
|
|
// chand->waiting_for_resolver_result_closures, and it may not be invoked
|
|
|
|
|
|
|
|
// until after this call has been destroyed. We mark the operation as
|
|
|
|
|
|
|
|
// cancelled, so that when pick_after_resolver_result_done_locked()
|
|
|
|
|
|
|
|
// is called, it will be a no-op. We also immediately invoke
|
|
|
|
|
|
|
|
// subchannel_ready_locked() to propagate the error back to the caller.
|
|
|
|
|
|
|
|
for (grpc_closure *closure = chand->waiting_for_resolver_result_closures.head; |
|
|
|
|
|
|
|
closure != NULL; closure = closure->next_data.next) { |
|
|
|
|
|
|
|
pick_after_resolver_result_args *args = closure->cb_arg; |
|
|
|
|
|
|
|
if (!args->cancelled && args->elem == elem) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", |
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
chand, calld, calld->lb_policy); |
|
|
|
"chand=%p calld=%p: " |
|
|
|
|
|
|
|
"cancelling pick waiting for resolver result", |
|
|
|
|
|
|
|
chand, calld); |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy, |
|
|
|
args->cancelled = true; |
|
|
|
&calld->connected_subchannel, |
|
|
|
subchannel_ready_locked(exec_ctx, elem, |
|
|
|
GRPC_ERROR_REF(error)); |
|
|
|
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( |
|
|
|
|
|
|
|
"Pick cancelled", &error, 1)); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
|
|
|
|
// Callback invoked by grpc_lb_policy_pick_locked() for async picks.
|
|
|
|
// Unrefs the LB policy and invokes subchannel_ready_locked().
|
|
|
|
// Unrefs the LB policy after invoking subchannel_ready_locked().
|
|
|
|
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
grpc_error *error) { |
|
|
|
grpc_error *error) { |
|
|
|
grpc_call_element *elem = arg; |
|
|
|
grpc_call_element *elem = arg; |
|
|
@ -1174,7 +1160,6 @@ static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously", |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously", |
|
|
|
chand, calld); |
|
|
|
chand, calld); |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner, NULL); |
|
|
|
|
|
|
|
GPR_ASSERT(calld->lb_policy != NULL); |
|
|
|
GPR_ASSERT(calld->lb_policy != NULL); |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); |
|
|
|
calld->lb_policy = NULL; |
|
|
|
calld->lb_policy = NULL; |
|
|
@ -1209,15 +1194,24 @@ static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
} |
|
|
|
} |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel"); |
|
|
|
calld->lb_policy = NULL; |
|
|
|
calld->lb_policy = NULL; |
|
|
|
} else { |
|
|
|
|
|
|
|
grpc_call_combiner_set_notify_on_cancel( |
|
|
|
|
|
|
|
exec_ctx, calld->call_combiner, |
|
|
|
|
|
|
|
GRPC_CLOSURE_INIT(&calld->cancel_closure, pick_callback_cancel_locked, |
|
|
|
|
|
|
|
elem, grpc_combiner_scheduler(chand->combiner))); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
return pick_done; |
|
|
|
return pick_done; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
grpc_call_element *elem, |
|
|
|
|
|
|
|
grpc_error *error) { |
|
|
|
|
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
|
|
|
|
call_data *calld = elem->call_data; |
|
|
|
|
|
|
|
GPR_ASSERT(calld->lb_policy != NULL); |
|
|
|
|
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
|
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", |
|
|
|
|
|
|
|
chand, calld, calld->lb_policy); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy, |
|
|
|
|
|
|
|
&calld->connected_subchannel, error); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
grpc_call_element *elem) { |
|
|
|
grpc_call_element *elem) { |
|
|
|
GPR_TIMER_BEGIN("pick_subchannel", 0); |
|
|
|
GPR_TIMER_BEGIN("pick_subchannel", 0); |
|
|
@ -1230,7 +1224,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
// Otherwise, if the service config specified a value for this
|
|
|
|
// Otherwise, if the service config specified a value for this
|
|
|
|
// method, use that.
|
|
|
|
// method, use that.
|
|
|
|
uint32_t initial_metadata_flags = |
|
|
|
uint32_t initial_metadata_flags = |
|
|
|
calld->initial_metadata_batch->payload->send_initial_metadata |
|
|
|
calld->initial_metadata_payload->send_initial_metadata |
|
|
|
.send_initial_metadata_flags; |
|
|
|
.send_initial_metadata_flags; |
|
|
|
const bool wait_for_ready_set_from_api = |
|
|
|
const bool wait_for_ready_set_from_api = |
|
|
|
initial_metadata_flags & |
|
|
|
initial_metadata_flags & |
|
|
@ -1247,7 +1241,7 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
const grpc_lb_policy_pick_args inputs = { |
|
|
|
const grpc_lb_policy_pick_args inputs = { |
|
|
|
calld->initial_metadata_batch->payload->send_initial_metadata |
|
|
|
calld->initial_metadata_payload->send_initial_metadata |
|
|
|
.send_initial_metadata, |
|
|
|
.send_initial_metadata, |
|
|
|
initial_metadata_flags, &calld->lb_token_mdelem}; |
|
|
|
initial_metadata_flags, &calld->lb_token_mdelem}; |
|
|
|
pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs); |
|
|
|
pick_done = pick_callback_start_locked(exec_ctx, elem, &inputs); |
|
|
@ -1264,33 +1258,91 @@ static bool pick_subchannel_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
return pick_done; |
|
|
|
return pick_done; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
static void start_transport_stream_op_batch_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
void *arg, |
|
|
|
grpc_error *error_ignored) { |
|
|
|
grpc_error *error_ignored) { |
|
|
|
GPR_TIMER_BEGIN("start_pick_locked", 0); |
|
|
|
GPR_TIMER_BEGIN("start_transport_stream_op_batch_locked", 0); |
|
|
|
grpc_call_element *elem = (grpc_call_element *)arg; |
|
|
|
grpc_transport_stream_op_batch *batch = arg; |
|
|
|
call_data *calld = (call_data *)elem->call_data; |
|
|
|
grpc_call_element *elem = batch->handler_private.extra_arg; |
|
|
|
channel_data *chand = (channel_data *)elem->channel_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
|
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
|
|
|
|
/* need to recheck that another thread hasn't set the call */ |
|
|
|
|
|
|
|
call_or_error coe = get_call_or_error(calld); |
|
|
|
|
|
|
|
if (coe.error != GRPC_ERROR_NONE) { |
|
|
|
|
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
|
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", |
|
|
|
|
|
|
|
chand, calld, grpc_error_string(coe.error)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
grpc_transport_stream_op_batch_finish_with_failure( |
|
|
|
|
|
|
|
exec_ctx, batch, GRPC_ERROR_REF(coe.error)); |
|
|
|
|
|
|
|
goto done; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (coe.subchannel_call != NULL) { |
|
|
|
|
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
|
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
|
|
|
|
"chand=%p calld=%p: sending batch to subchannel_call=%p", chand, |
|
|
|
|
|
|
|
calld, coe.subchannel_call); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch); |
|
|
|
|
|
|
|
goto done; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// Add to waiting-for-pick list. If we succeed in getting a
|
|
|
|
|
|
|
|
// subchannel call below, we'll handle this batch (along with any
|
|
|
|
|
|
|
|
// other waiting batches) in waiting_for_pick_batches_resume_locked().
|
|
|
|
|
|
|
|
waiting_for_pick_batches_add_locked(calld, batch); |
|
|
|
|
|
|
|
// If this is a cancellation, cancel the pending pick (if any) and
|
|
|
|
|
|
|
|
// fail any pending batches.
|
|
|
|
|
|
|
|
if (batch->cancel_stream) { |
|
|
|
|
|
|
|
grpc_error *error = batch->payload->cancel_stream.cancel_error; |
|
|
|
|
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
|
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand, |
|
|
|
|
|
|
|
calld, grpc_error_string(error)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
/* Stash a copy of cancel_error in our call data, so that we can use
|
|
|
|
|
|
|
|
it for subsequent operations. This ensures that if the call is |
|
|
|
|
|
|
|
cancelled before any batches are passed down (e.g., if the deadline |
|
|
|
|
|
|
|
is in the past when the call starts), we can return the right |
|
|
|
|
|
|
|
error to the caller when the first batch does get passed down. */ |
|
|
|
|
|
|
|
set_call_or_error(calld, (call_or_error){.error = GRPC_ERROR_REF(error)}); |
|
|
|
|
|
|
|
if (calld->lb_policy != NULL) { |
|
|
|
|
|
|
|
pick_callback_cancel_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
pick_after_resolver_result_cancel_locked(exec_ctx, elem, |
|
|
|
|
|
|
|
GRPC_ERROR_REF(error)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
waiting_for_pick_batches_fail_locked(exec_ctx, elem, GRPC_ERROR_REF(error)); |
|
|
|
|
|
|
|
goto done; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
/* if we don't have a subchannel, try to get one */ |
|
|
|
|
|
|
|
if (batch->send_initial_metadata) { |
|
|
|
GPR_ASSERT(calld->connected_subchannel == NULL); |
|
|
|
GPR_ASSERT(calld->connected_subchannel == NULL); |
|
|
|
|
|
|
|
calld->initial_metadata_payload = batch->payload; |
|
|
|
|
|
|
|
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); |
|
|
|
|
|
|
|
/* If a subchannel is not available immediately, the polling entity from
|
|
|
|
|
|
|
|
call_data should be provided to channel_data's interested_parties, so |
|
|
|
|
|
|
|
that IO of the lb_policy and resolver could be done under it. */ |
|
|
|
if (pick_subchannel_locked(exec_ctx, elem)) { |
|
|
|
if (pick_subchannel_locked(exec_ctx, elem)) { |
|
|
|
// Pick was returned synchronously.
|
|
|
|
// Pick was returned synchronously.
|
|
|
|
|
|
|
|
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); |
|
|
|
if (calld->connected_subchannel == NULL) { |
|
|
|
if (calld->connected_subchannel == NULL) { |
|
|
|
GRPC_ERROR_UNREF(calld->error); |
|
|
|
grpc_error *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
|
|
|
calld->error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
|
|
|
|
|
|
|
"Call dropped by load balancing policy"); |
|
|
|
"Call dropped by load balancing policy"); |
|
|
|
waiting_for_pick_batches_fail(exec_ctx, elem, |
|
|
|
set_call_or_error(calld, |
|
|
|
GRPC_ERROR_REF(calld->error)); |
|
|
|
(call_or_error){.error = GRPC_ERROR_REF(error)}); |
|
|
|
|
|
|
|
waiting_for_pick_batches_fail_locked(exec_ctx, elem, error); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
// Create subchannel call.
|
|
|
|
// Create subchannel call.
|
|
|
|
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE); |
|
|
|
create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_NONE); |
|
|
|
} |
|
|
|
} |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
// Pick will be done asynchronously. Add the call's polling entity to
|
|
|
|
|
|
|
|
// the channel's interested_parties, so that I/O for the resolver
|
|
|
|
|
|
|
|
// and LB policy can be done under it.
|
|
|
|
|
|
|
|
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent, |
|
|
|
grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent, |
|
|
|
chand->interested_parties); |
|
|
|
chand->interested_parties); |
|
|
|
} |
|
|
|
} |
|
|
|
GPR_TIMER_END("start_pick_locked", 0); |
|
|
|
} |
|
|
|
|
|
|
|
done: |
|
|
|
|
|
|
|
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, |
|
|
|
|
|
|
|
"start_transport_stream_op_batch"); |
|
|
|
|
|
|
|
GPR_TIMER_END("start_transport_stream_op_batch_locked", 0); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { |
|
|
|
static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { |
|
|
@ -1313,49 +1365,27 @@ static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { |
|
|
|
GRPC_ERROR_REF(error)); |
|
|
|
GRPC_ERROR_REF(error)); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* The logic here is fairly complicated, due to (a) the fact that we
|
|
|
|
|
|
|
|
need to handle the case where we receive the send op before the |
|
|
|
|
|
|
|
initial metadata op, and (b) the need for efficiency, especially in |
|
|
|
|
|
|
|
the streaming case. |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
We use double-checked locking to initially see if initialization has been |
|
|
|
|
|
|
|
performed. If it has not, we acquire the combiner and perform initialization. |
|
|
|
|
|
|
|
If it has, we proceed on the fast path. */ |
|
|
|
static void cc_start_transport_stream_op_batch( |
|
|
|
static void cc_start_transport_stream_op_batch( |
|
|
|
grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
|
|
|
grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
|
|
|
grpc_transport_stream_op_batch *batch) { |
|
|
|
grpc_transport_stream_op_batch *batch) { |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
|
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace) || |
|
|
|
|
|
|
|
GRPC_TRACER_ON(grpc_trace_channel)) { |
|
|
|
|
|
|
|
grpc_call_log_op(GPR_INFO, elem, batch); |
|
|
|
|
|
|
|
} |
|
|
|
if (chand->deadline_checking_enabled) { |
|
|
|
if (chand->deadline_checking_enabled) { |
|
|
|
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem, |
|
|
|
grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem, |
|
|
|
batch); |
|
|
|
batch); |
|
|
|
} |
|
|
|
} |
|
|
|
GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0); |
|
|
|
|
|
|
|
// If we've previously been cancelled, immediately fail any new batches.
|
|
|
|
|
|
|
|
if (calld->error != GRPC_ERROR_NONE) { |
|
|
|
|
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
|
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", |
|
|
|
|
|
|
|
chand, calld, grpc_error_string(calld->error)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
grpc_transport_stream_op_batch_finish_with_failure( |
|
|
|
|
|
|
|
exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner); |
|
|
|
|
|
|
|
goto done; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (batch->cancel_stream) { |
|
|
|
|
|
|
|
// Stash a copy of cancel_error in our call data, so that we can use
|
|
|
|
|
|
|
|
// it for subsequent operations. This ensures that if the call is
|
|
|
|
|
|
|
|
// cancelled before any batches are passed down (e.g., if the deadline
|
|
|
|
|
|
|
|
// is in the past when the call starts), we can return the right
|
|
|
|
|
|
|
|
// error to the caller when the first batch does get passed down.
|
|
|
|
|
|
|
|
GRPC_ERROR_UNREF(calld->error); |
|
|
|
|
|
|
|
calld->error = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); |
|
|
|
|
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
|
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand, |
|
|
|
|
|
|
|
calld, grpc_error_string(calld->error)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// If we have a subchannel call, send the cancellation batch down.
|
|
|
|
|
|
|
|
// Otherwise, fail all pending batches.
|
|
|
|
|
|
|
|
if (calld->subchannel_call != NULL) { |
|
|
|
|
|
|
|
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
waiting_for_pick_batches_add(calld, batch); |
|
|
|
|
|
|
|
waiting_for_pick_batches_fail(exec_ctx, elem, |
|
|
|
|
|
|
|
GRPC_ERROR_REF(calld->error)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
goto done; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// Intercept on_complete for recv_trailing_metadata so that we can
|
|
|
|
// Intercept on_complete for recv_trailing_metadata so that we can
|
|
|
|
// check retry throttle status.
|
|
|
|
// check retry throttle status.
|
|
|
|
if (batch->recv_trailing_metadata) { |
|
|
|
if (batch->recv_trailing_metadata) { |
|
|
@ -1365,43 +1395,38 @@ static void cc_start_transport_stream_op_batch( |
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
batch->on_complete = &calld->on_complete; |
|
|
|
batch->on_complete = &calld->on_complete; |
|
|
|
} |
|
|
|
} |
|
|
|
// Check if we've already gotten a subchannel call.
|
|
|
|
/* try to (atomically) get the call */ |
|
|
|
// Note that once we have completed the pick, we do not need to enter
|
|
|
|
call_or_error coe = get_call_or_error(calld); |
|
|
|
// the channel combiner, which is more efficient (especially for
|
|
|
|
GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0); |
|
|
|
// streaming calls).
|
|
|
|
if (coe.error != GRPC_ERROR_NONE) { |
|
|
|
if (calld->subchannel_call != NULL) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
|
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", |
|
|
|
|
|
|
|
chand, calld, grpc_error_string(coe.error)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
grpc_transport_stream_op_batch_finish_with_failure( |
|
|
|
|
|
|
|
exec_ctx, batch, GRPC_ERROR_REF(coe.error)); |
|
|
|
|
|
|
|
goto done; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (coe.subchannel_call != NULL) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
"chand=%p calld=%p: sending batch to subchannel_call=%p", chand, |
|
|
|
"chand=%p calld=%p: sending batch to subchannel_call=%p", chand, |
|
|
|
calld, calld->subchannel_call); |
|
|
|
calld, coe.subchannel_call); |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch); |
|
|
|
grpc_subchannel_call_process_op(exec_ctx, coe.subchannel_call, batch); |
|
|
|
goto done; |
|
|
|
goto done; |
|
|
|
} |
|
|
|
} |
|
|
|
// We do not yet have a subchannel call.
|
|
|
|
/* we failed; lock and figure out what to do */ |
|
|
|
// Add the batch to the waiting-for-pick list.
|
|
|
|
|
|
|
|
waiting_for_pick_batches_add(calld, batch); |
|
|
|
|
|
|
|
// For batches containing a send_initial_metadata op, enter the channel
|
|
|
|
|
|
|
|
// combiner to start a pick.
|
|
|
|
|
|
|
|
if (batch->send_initial_metadata) { |
|
|
|
|
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld); |
|
|
|
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering combiner", chand, calld); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op_batch"); |
|
|
|
|
|
|
|
batch->handler_private.extra_arg = elem; |
|
|
|
GRPC_CLOSURE_SCHED( |
|
|
|
GRPC_CLOSURE_SCHED( |
|
|
|
exec_ctx, |
|
|
|
exec_ctx, GRPC_CLOSURE_INIT(&batch->handler_private.closure, |
|
|
|
GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked, |
|
|
|
start_transport_stream_op_batch_locked, batch, |
|
|
|
elem, grpc_combiner_scheduler(chand->combiner)), |
|
|
|
grpc_combiner_scheduler(chand->combiner)), |
|
|
|
GRPC_ERROR_NONE); |
|
|
|
GRPC_ERROR_NONE); |
|
|
|
} else { |
|
|
|
|
|
|
|
// For all other batches, release the call combiner.
|
|
|
|
|
|
|
|
if (GRPC_TRACER_ON(grpc_client_channel_trace)) { |
|
|
|
|
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
|
|
|
|
"chand=%p calld=%p: saved batch, yeilding call combiner", chand, |
|
|
|
|
|
|
|
calld); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner, |
|
|
|
|
|
|
|
"batch does not include send_initial_metadata"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
done: |
|
|
|
done: |
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0); |
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op_batch", 0); |
|
|
|
} |
|
|
|
} |
|
|
@ -1416,11 +1441,10 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
calld->path = grpc_slice_ref_internal(args->path); |
|
|
|
calld->path = grpc_slice_ref_internal(args->path); |
|
|
|
calld->call_start_time = args->start_time; |
|
|
|
calld->call_start_time = args->start_time; |
|
|
|
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); |
|
|
|
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); |
|
|
|
|
|
|
|
calld->owning_call = args->call_stack; |
|
|
|
calld->arena = args->arena; |
|
|
|
calld->arena = args->arena; |
|
|
|
calld->call_combiner = args->call_combiner; |
|
|
|
|
|
|
|
if (chand->deadline_checking_enabled) { |
|
|
|
if (chand->deadline_checking_enabled) { |
|
|
|
grpc_deadline_state_init(exec_ctx, elem, args->call_stack, |
|
|
|
grpc_deadline_state_init(exec_ctx, elem, args->call_stack, calld->deadline); |
|
|
|
args->call_combiner, calld->deadline); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
return GRPC_ERROR_NONE; |
|
|
|
return GRPC_ERROR_NONE; |
|
|
|
} |
|
|
|
} |
|
|
@ -1439,12 +1463,13 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
if (calld->method_params != NULL) { |
|
|
|
if (calld->method_params != NULL) { |
|
|
|
method_parameters_unref(calld->method_params); |
|
|
|
method_parameters_unref(calld->method_params); |
|
|
|
} |
|
|
|
} |
|
|
|
GRPC_ERROR_UNREF(calld->error); |
|
|
|
call_or_error coe = get_call_or_error(calld); |
|
|
|
if (calld->subchannel_call != NULL) { |
|
|
|
GRPC_ERROR_UNREF(coe.error); |
|
|
|
grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call, |
|
|
|
if (coe.subchannel_call != NULL) { |
|
|
|
|
|
|
|
grpc_subchannel_call_set_cleanup_closure(coe.subchannel_call, |
|
|
|
then_schedule_closure); |
|
|
|
then_schedule_closure); |
|
|
|
then_schedule_closure = NULL; |
|
|
|
then_schedule_closure = NULL; |
|
|
|
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call, |
|
|
|
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, coe.subchannel_call, |
|
|
|
"client_channel_destroy_call"); |
|
|
|
"client_channel_destroy_call"); |
|
|
|
} |
|
|
|
} |
|
|
|
GPR_ASSERT(calld->lb_policy == NULL); |
|
|
|
GPR_ASSERT(calld->lb_policy == NULL); |
|
|
@ -1483,6 +1508,7 @@ const grpc_channel_filter grpc_client_channel_filter = { |
|
|
|
sizeof(channel_data), |
|
|
|
sizeof(channel_data), |
|
|
|
cc_init_channel_elem, |
|
|
|
cc_init_channel_elem, |
|
|
|
cc_destroy_channel_elem, |
|
|
|
cc_destroy_channel_elem, |
|
|
|
|
|
|
|
cc_get_peer, |
|
|
|
cc_get_channel_info, |
|
|
|
cc_get_channel_info, |
|
|
|
"client-channel", |
|
|
|
"client-channel", |
|
|
|
}; |
|
|
|
}; |
|
|
|