|
|
@ -44,13 +44,14 @@ |
|
|
|
#include <grpc/support/useful.h> |
|
|
|
#include <grpc/support/useful.h> |
|
|
|
|
|
|
|
|
|
|
|
#include "src/core/ext/client_channel/http_connect_handshaker.h" |
|
|
|
#include "src/core/ext/client_channel/http_connect_handshaker.h" |
|
|
|
#include "src/core/ext/client_channel/http_proxy.h" |
|
|
|
|
|
|
|
#include "src/core/ext/client_channel/lb_policy_registry.h" |
|
|
|
#include "src/core/ext/client_channel/lb_policy_registry.h" |
|
|
|
|
|
|
|
#include "src/core/ext/client_channel/proxy_mapper_registry.h" |
|
|
|
#include "src/core/ext/client_channel/resolver_registry.h" |
|
|
|
#include "src/core/ext/client_channel/resolver_registry.h" |
|
|
|
#include "src/core/ext/client_channel/subchannel.h" |
|
|
|
#include "src/core/ext/client_channel/subchannel.h" |
|
|
|
#include "src/core/lib/channel/channel_args.h" |
|
|
|
#include "src/core/lib/channel/channel_args.h" |
|
|
|
#include "src/core/lib/channel/connected_channel.h" |
|
|
|
#include "src/core/lib/channel/connected_channel.h" |
|
|
|
#include "src/core/lib/channel/deadline_filter.h" |
|
|
|
#include "src/core/lib/channel/deadline_filter.h" |
|
|
|
|
|
|
|
#include "src/core/lib/iomgr/combiner.h" |
|
|
|
#include "src/core/lib/iomgr/iomgr.h" |
|
|
|
#include "src/core/lib/iomgr/iomgr.h" |
|
|
|
#include "src/core/lib/iomgr/polling_entity.h" |
|
|
|
#include "src/core/lib/iomgr/polling_entity.h" |
|
|
|
#include "src/core/lib/profiling/timers.h" |
|
|
|
#include "src/core/lib/profiling/timers.h" |
|
|
@ -153,10 +154,6 @@ static void *method_parameters_create_from_json(const grpc_json *json) { |
|
|
|
*/ |
|
|
|
*/ |
|
|
|
|
|
|
|
|
|
|
|
typedef struct client_channel_channel_data { |
|
|
|
typedef struct client_channel_channel_data { |
|
|
|
/** server name */ |
|
|
|
|
|
|
|
char *server_name; |
|
|
|
|
|
|
|
/** HTTP CONNECT proxy to use, if any */ |
|
|
|
|
|
|
|
char *proxy_name; |
|
|
|
|
|
|
|
/** resolver for this channel */ |
|
|
|
/** resolver for this channel */ |
|
|
|
grpc_resolver *resolver; |
|
|
|
grpc_resolver *resolver; |
|
|
|
/** have we started resolving this channel */ |
|
|
|
/** have we started resolving this channel */ |
|
|
@ -164,13 +161,10 @@ typedef struct client_channel_channel_data { |
|
|
|
/** client channel factory */ |
|
|
|
/** client channel factory */ |
|
|
|
grpc_client_channel_factory *client_channel_factory; |
|
|
|
grpc_client_channel_factory *client_channel_factory; |
|
|
|
|
|
|
|
|
|
|
|
/** mutex protecting all variables below in this data structure */ |
|
|
|
/** combiner protecting all variables below in this data structure */ |
|
|
|
gpr_mu mu; |
|
|
|
grpc_combiner *combiner; |
|
|
|
/** currently active load balancer */ |
|
|
|
/** currently active load balancer */ |
|
|
|
char *lb_policy_name; |
|
|
|
|
|
|
|
grpc_lb_policy *lb_policy; |
|
|
|
grpc_lb_policy *lb_policy; |
|
|
|
/** service config in JSON form */ |
|
|
|
|
|
|
|
char *service_config_json; |
|
|
|
|
|
|
|
/** maps method names to method_parameters structs */ |
|
|
|
/** maps method names to method_parameters structs */ |
|
|
|
grpc_slice_hash_table *method_params_table; |
|
|
|
grpc_slice_hash_table *method_params_table; |
|
|
|
/** incoming resolver result - set by resolver.next() */ |
|
|
|
/** incoming resolver result - set by resolver.next() */ |
|
|
@ -187,6 +181,13 @@ typedef struct client_channel_channel_data { |
|
|
|
grpc_channel_stack *owning_stack; |
|
|
|
grpc_channel_stack *owning_stack; |
|
|
|
/** interested parties (owned) */ |
|
|
|
/** interested parties (owned) */ |
|
|
|
grpc_pollset_set *interested_parties; |
|
|
|
grpc_pollset_set *interested_parties; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* the following properties are guarded by a mutex since API's require them
|
|
|
|
|
|
|
|
to be instantaniously available */ |
|
|
|
|
|
|
|
gpr_mu info_mu; |
|
|
|
|
|
|
|
char *info_lb_policy_name; |
|
|
|
|
|
|
|
/** service config in JSON form */ |
|
|
|
|
|
|
|
char *info_service_config_json; |
|
|
|
} channel_data; |
|
|
|
} channel_data; |
|
|
|
|
|
|
|
|
|
|
|
/** We create one watcher for each new lb_policy that is returned from a
|
|
|
|
/** We create one watcher for each new lb_policy that is returned from a
|
|
|
@ -222,32 +223,23 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
lb_policy_connectivity_watcher *w, |
|
|
|
void *arg, grpc_error *error) { |
|
|
|
grpc_error *error) { |
|
|
|
lb_policy_connectivity_watcher *w = arg; |
|
|
|
grpc_connectivity_state publish_state = w->state; |
|
|
|
grpc_connectivity_state publish_state = w->state; |
|
|
|
/* check if the notification is for a stale policy */ |
|
|
|
/* check if the notification is for the latest policy */ |
|
|
|
if (w->lb_policy != w->chand->lb_policy) return; |
|
|
|
if (w->lb_policy == w->chand->lb_policy) { |
|
|
|
|
|
|
|
if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) { |
|
|
|
if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) { |
|
|
|
publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE; |
|
|
|
publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE; |
|
|
|
grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver); |
|
|
|
grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver); |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel"); |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel"); |
|
|
|
w->chand->lb_policy = NULL; |
|
|
|
w->chand->lb_policy = NULL; |
|
|
|
} |
|
|
|
} |
|
|
|
set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state, |
|
|
|
set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state, |
|
|
|
GRPC_ERROR_REF(error), "lb_changed"); |
|
|
|
GRPC_ERROR_REF(error), "lb_changed"); |
|
|
|
if (w->state != GRPC_CHANNEL_SHUTDOWN) { |
|
|
|
if (w->state != GRPC_CHANNEL_SHUTDOWN) { |
|
|
|
watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state); |
|
|
|
watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
|
|
|
|
grpc_error *error) { |
|
|
|
|
|
|
|
lb_policy_connectivity_watcher *w = arg; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gpr_mu_lock(&w->chand->mu); |
|
|
|
|
|
|
|
on_lb_policy_state_changed_locked(exec_ctx, w, error); |
|
|
|
|
|
|
|
gpr_mu_unlock(&w->chand->mu); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy"); |
|
|
|
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy"); |
|
|
|
gpr_free(w); |
|
|
|
gpr_free(w); |
|
|
@ -260,16 +252,16 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand, |
|
|
|
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy"); |
|
|
|
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy"); |
|
|
|
|
|
|
|
|
|
|
|
w->chand = chand; |
|
|
|
w->chand = chand; |
|
|
|
grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w, |
|
|
|
grpc_closure_init(&w->on_changed, on_lb_policy_state_changed_locked, w, |
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
grpc_combiner_scheduler(chand->combiner, false)); |
|
|
|
w->state = current_state; |
|
|
|
w->state = current_state; |
|
|
|
w->lb_policy = lb_policy; |
|
|
|
w->lb_policy = lb_policy; |
|
|
|
grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state, |
|
|
|
grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state, |
|
|
|
&w->on_changed); |
|
|
|
&w->on_changed); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
grpc_error *error) { |
|
|
|
void *arg, grpc_error *error) { |
|
|
|
channel_data *chand = arg; |
|
|
|
channel_data *chand = arg; |
|
|
|
char *lb_policy_name = NULL; |
|
|
|
char *lb_policy_name = NULL; |
|
|
|
grpc_lb_policy *lb_policy = NULL; |
|
|
|
grpc_lb_policy *lb_policy = NULL; |
|
|
@ -317,17 +309,6 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
// Use pick_first if nothing was specified and we didn't select grpclb
|
|
|
|
// Use pick_first if nothing was specified and we didn't select grpclb
|
|
|
|
// above.
|
|
|
|
// above.
|
|
|
|
if (lb_policy_name == NULL) lb_policy_name = "pick_first"; |
|
|
|
if (lb_policy_name == NULL) lb_policy_name = "pick_first"; |
|
|
|
// If using a proxy, add channel arg for server in HTTP CONNECT request.
|
|
|
|
|
|
|
|
if (chand->proxy_name != NULL) { |
|
|
|
|
|
|
|
grpc_arg new_arg; |
|
|
|
|
|
|
|
new_arg.key = GRPC_ARG_HTTP_CONNECT_SERVER; |
|
|
|
|
|
|
|
new_arg.type = GRPC_ARG_STRING; |
|
|
|
|
|
|
|
new_arg.value.string = chand->server_name; |
|
|
|
|
|
|
|
grpc_channel_args *tmp_args = chand->resolver_result; |
|
|
|
|
|
|
|
chand->resolver_result = |
|
|
|
|
|
|
|
grpc_channel_args_copy_and_add(chand->resolver_result, &new_arg, 1); |
|
|
|
|
|
|
|
grpc_channel_args_destroy(exec_ctx, tmp_args); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// Instantiate LB policy.
|
|
|
|
// Instantiate LB policy.
|
|
|
|
grpc_lb_policy_args lb_policy_args; |
|
|
|
grpc_lb_policy_args lb_policy_args; |
|
|
|
lb_policy_args.args = chand->resolver_result; |
|
|
|
lb_policy_args.args = chand->resolver_result; |
|
|
@ -368,17 +349,18 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
chand->interested_parties); |
|
|
|
chand->interested_parties); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
gpr_mu_lock(&chand->mu); |
|
|
|
gpr_mu_lock(&chand->info_mu); |
|
|
|
if (lb_policy_name != NULL) { |
|
|
|
if (lb_policy_name != NULL) { |
|
|
|
gpr_free(chand->lb_policy_name); |
|
|
|
gpr_free(chand->info_lb_policy_name); |
|
|
|
chand->lb_policy_name = lb_policy_name; |
|
|
|
chand->info_lb_policy_name = lb_policy_name; |
|
|
|
} |
|
|
|
} |
|
|
|
old_lb_policy = chand->lb_policy; |
|
|
|
old_lb_policy = chand->lb_policy; |
|
|
|
chand->lb_policy = lb_policy; |
|
|
|
chand->lb_policy = lb_policy; |
|
|
|
if (service_config_json != NULL) { |
|
|
|
if (service_config_json != NULL) { |
|
|
|
gpr_free(chand->service_config_json); |
|
|
|
gpr_free(chand->info_service_config_json); |
|
|
|
chand->service_config_json = service_config_json; |
|
|
|
chand->info_service_config_json = service_config_json; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
gpr_mu_unlock(&chand->info_mu); |
|
|
|
if (chand->method_params_table != NULL) { |
|
|
|
if (chand->method_params_table != NULL) { |
|
|
|
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table); |
|
|
|
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table); |
|
|
|
} |
|
|
|
} |
|
|
@ -406,7 +388,6 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); |
|
|
|
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); |
|
|
|
grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result, |
|
|
|
grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result, |
|
|
|
&chand->on_resolver_result_changed); |
|
|
|
&chand->on_resolver_result_changed); |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
|
|
|
|
} else { |
|
|
|
} else { |
|
|
|
if (chand->resolver != NULL) { |
|
|
|
if (chand->resolver != NULL) { |
|
|
|
grpc_resolver_shutdown(exec_ctx, chand->resolver); |
|
|
|
grpc_resolver_shutdown(exec_ctx, chand->resolver); |
|
|
@ -419,7 +400,6 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs, |
|
|
|
GRPC_ERROR_CREATE_REFERENCING("Got config after disconnection", refs, |
|
|
|
GPR_ARRAY_SIZE(refs)), |
|
|
|
GPR_ARRAY_SIZE(refs)), |
|
|
|
"resolver_gone"); |
|
|
|
"resolver_gone"); |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (exit_idle) { |
|
|
|
if (exit_idle) { |
|
|
@ -441,20 +421,12 @@ static void on_resolver_result_changed(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
GRPC_ERROR_UNREF(state_error); |
|
|
|
GRPC_ERROR_UNREF(state_error); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, |
|
|
|
static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
grpc_channel_element *elem, |
|
|
|
grpc_error *error_ignored) { |
|
|
|
grpc_transport_op *op) { |
|
|
|
grpc_transport_op *op = arg; |
|
|
|
|
|
|
|
grpc_channel_element *elem = op->transport_private.args[0]; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
|
|
|
|
|
|
|
|
grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GPR_ASSERT(op->set_accept_stream == false); |
|
|
|
|
|
|
|
if (op->bind_pollset != NULL) { |
|
|
|
|
|
|
|
grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, |
|
|
|
|
|
|
|
op->bind_pollset); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gpr_mu_lock(&chand->mu); |
|
|
|
|
|
|
|
if (op->on_connectivity_state_change != NULL) { |
|
|
|
if (op->on_connectivity_state_change != NULL) { |
|
|
|
grpc_connectivity_state_notify_on_state_change( |
|
|
|
grpc_connectivity_state_notify_on_state_change( |
|
|
|
exec_ctx, &chand->state_tracker, op->connectivity_state, |
|
|
|
exec_ctx, &chand->state_tracker, op->connectivity_state, |
|
|
@ -497,25 +469,48 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, |
|
|
|
} |
|
|
|
} |
|
|
|
GRPC_ERROR_UNREF(op->disconnect_with_error); |
|
|
|
GRPC_ERROR_UNREF(op->disconnect_with_error); |
|
|
|
} |
|
|
|
} |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "start_transport_op"); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
grpc_channel_element *elem, |
|
|
|
|
|
|
|
grpc_transport_op *op) { |
|
|
|
|
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GPR_ASSERT(op->set_accept_stream == false); |
|
|
|
|
|
|
|
if (op->bind_pollset != NULL) { |
|
|
|
|
|
|
|
grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, |
|
|
|
|
|
|
|
op->bind_pollset); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
op->transport_private.args[0] = elem; |
|
|
|
|
|
|
|
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op"); |
|
|
|
|
|
|
|
grpc_closure_sched( |
|
|
|
|
|
|
|
exec_ctx, grpc_closure_init( |
|
|
|
|
|
|
|
&op->transport_private.closure, start_transport_op_locked, |
|
|
|
|
|
|
|
op, grpc_combiner_scheduler(chand->combiner, false)), |
|
|
|
|
|
|
|
GRPC_ERROR_NONE); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void cc_get_channel_info(grpc_exec_ctx *exec_ctx, |
|
|
|
static void cc_get_channel_info(grpc_exec_ctx *exec_ctx, |
|
|
|
grpc_channel_element *elem, |
|
|
|
grpc_channel_element *elem, |
|
|
|
const grpc_channel_info *info) { |
|
|
|
const grpc_channel_info *info) { |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
gpr_mu_lock(&chand->mu); |
|
|
|
gpr_mu_lock(&chand->info_mu); |
|
|
|
if (info->lb_policy_name != NULL) { |
|
|
|
if (info->lb_policy_name != NULL) { |
|
|
|
*info->lb_policy_name = chand->lb_policy_name == NULL |
|
|
|
*info->lb_policy_name = chand->info_lb_policy_name == NULL |
|
|
|
? NULL |
|
|
|
? NULL |
|
|
|
: gpr_strdup(chand->lb_policy_name); |
|
|
|
: gpr_strdup(chand->info_lb_policy_name); |
|
|
|
} |
|
|
|
} |
|
|
|
if (info->service_config_json != NULL) { |
|
|
|
if (info->service_config_json != NULL) { |
|
|
|
*info->service_config_json = chand->service_config_json == NULL |
|
|
|
*info->service_config_json = |
|
|
|
? NULL |
|
|
|
chand->info_service_config_json == NULL |
|
|
|
: gpr_strdup(chand->service_config_json); |
|
|
|
? NULL |
|
|
|
|
|
|
|
: gpr_strdup(chand->info_service_config_json); |
|
|
|
} |
|
|
|
} |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
gpr_mu_unlock(&chand->info_mu); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/* Constructor for channel_data */ |
|
|
|
/* Constructor for channel_data */ |
|
|
@ -527,11 +522,12 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
GPR_ASSERT(args->is_last); |
|
|
|
GPR_ASSERT(args->is_last); |
|
|
|
GPR_ASSERT(elem->filter == &grpc_client_channel_filter); |
|
|
|
GPR_ASSERT(elem->filter == &grpc_client_channel_filter); |
|
|
|
// Initialize data members.
|
|
|
|
// Initialize data members.
|
|
|
|
gpr_mu_init(&chand->mu); |
|
|
|
chand->combiner = grpc_combiner_create(NULL); |
|
|
|
|
|
|
|
gpr_mu_init(&chand->info_mu); |
|
|
|
chand->owning_stack = args->channel_stack; |
|
|
|
chand->owning_stack = args->channel_stack; |
|
|
|
grpc_closure_init(&chand->on_resolver_result_changed, |
|
|
|
grpc_closure_init(&chand->on_resolver_result_changed, |
|
|
|
on_resolver_result_changed, chand, |
|
|
|
on_resolver_result_changed_locked, chand, |
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
grpc_combiner_scheduler(chand->combiner, false)); |
|
|
|
chand->interested_parties = grpc_pollset_set_create(); |
|
|
|
chand->interested_parties = grpc_pollset_set_create(); |
|
|
|
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, |
|
|
|
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE, |
|
|
|
"client_channel"); |
|
|
|
"client_channel"); |
|
|
@ -542,24 +538,21 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
GPR_ASSERT(arg->type == GRPC_ARG_POINTER); |
|
|
|
GPR_ASSERT(arg->type == GRPC_ARG_POINTER); |
|
|
|
grpc_client_channel_factory_ref(arg->value.pointer.p); |
|
|
|
grpc_client_channel_factory_ref(arg->value.pointer.p); |
|
|
|
chand->client_channel_factory = arg->value.pointer.p; |
|
|
|
chand->client_channel_factory = arg->value.pointer.p; |
|
|
|
// Instantiate resolver.
|
|
|
|
// Get server name to resolve, using proxy mapper if needed.
|
|
|
|
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI); |
|
|
|
arg = grpc_channel_args_find(args->channel_args, GRPC_ARG_SERVER_URI); |
|
|
|
GPR_ASSERT(arg != NULL); |
|
|
|
GPR_ASSERT(arg != NULL); |
|
|
|
GPR_ASSERT(arg->type == GRPC_ARG_STRING); |
|
|
|
GPR_ASSERT(arg->type == GRPC_ARG_STRING); |
|
|
|
grpc_uri *uri = grpc_uri_parse(arg->value.string, true); |
|
|
|
char *proxy_name = NULL; |
|
|
|
if (uri == NULL) return GRPC_ERROR_CREATE("cannot parse server URI"); |
|
|
|
grpc_channel_args *new_args = NULL; |
|
|
|
if (uri->path[0] == '\0') { |
|
|
|
grpc_proxy_mappers_map_name(exec_ctx, arg->value.string, args->channel_args, |
|
|
|
grpc_uri_destroy(uri); |
|
|
|
&proxy_name, &new_args); |
|
|
|
return GRPC_ERROR_CREATE("server URI is missing path"); |
|
|
|
// Instantiate resolver.
|
|
|
|
} |
|
|
|
|
|
|
|
chand->server_name = |
|
|
|
|
|
|
|
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path); |
|
|
|
|
|
|
|
grpc_uri_destroy(uri); |
|
|
|
|
|
|
|
chand->proxy_name = grpc_get_http_proxy_server(); |
|
|
|
|
|
|
|
char *name_to_resolve = |
|
|
|
|
|
|
|
chand->proxy_name == NULL ? arg->value.string : chand->proxy_name; |
|
|
|
|
|
|
|
chand->resolver = grpc_resolver_create( |
|
|
|
chand->resolver = grpc_resolver_create( |
|
|
|
exec_ctx, name_to_resolve, args->channel_args, chand->interested_parties); |
|
|
|
exec_ctx, proxy_name != NULL ? proxy_name : arg->value.string, |
|
|
|
|
|
|
|
new_args != NULL ? new_args : args->channel_args, |
|
|
|
|
|
|
|
chand->interested_parties); |
|
|
|
|
|
|
|
if (proxy_name != NULL) gpr_free(proxy_name); |
|
|
|
|
|
|
|
if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args); |
|
|
|
if (chand->resolver == NULL) { |
|
|
|
if (chand->resolver == NULL) { |
|
|
|
return GRPC_ERROR_CREATE("resolver creation failed"); |
|
|
|
return GRPC_ERROR_CREATE("resolver creation failed"); |
|
|
|
} |
|
|
|
} |
|
|
@ -570,8 +563,6 @@ static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
grpc_channel_element *elem) { |
|
|
|
grpc_channel_element *elem) { |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
gpr_free(chand->server_name); |
|
|
|
|
|
|
|
gpr_free(chand->proxy_name); |
|
|
|
|
|
|
|
if (chand->resolver != NULL) { |
|
|
|
if (chand->resolver != NULL) { |
|
|
|
grpc_resolver_shutdown(exec_ctx, chand->resolver); |
|
|
|
grpc_resolver_shutdown(exec_ctx, chand->resolver); |
|
|
|
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel"); |
|
|
|
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel"); |
|
|
@ -585,14 +576,15 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
chand->interested_parties); |
|
|
|
chand->interested_parties); |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel"); |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel"); |
|
|
|
} |
|
|
|
} |
|
|
|
gpr_free(chand->lb_policy_name); |
|
|
|
gpr_free(chand->info_lb_policy_name); |
|
|
|
gpr_free(chand->service_config_json); |
|
|
|
gpr_free(chand->info_service_config_json); |
|
|
|
if (chand->method_params_table != NULL) { |
|
|
|
if (chand->method_params_table != NULL) { |
|
|
|
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table); |
|
|
|
grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table); |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker); |
|
|
|
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker); |
|
|
|
grpc_pollset_set_destroy(chand->interested_parties); |
|
|
|
grpc_pollset_set_destroy(exec_ctx, chand->interested_parties); |
|
|
|
gpr_mu_destroy(&chand->mu); |
|
|
|
GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel"); |
|
|
|
|
|
|
|
gpr_mu_destroy(&chand->info_mu); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/*************************************************************************
|
|
|
|
/*************************************************************************
|
|
|
@ -635,8 +627,6 @@ typedef struct client_channel_call_data { |
|
|
|
grpc_subchannel_call */ |
|
|
|
grpc_subchannel_call */ |
|
|
|
gpr_atm subchannel_call; |
|
|
|
gpr_atm subchannel_call; |
|
|
|
|
|
|
|
|
|
|
|
gpr_mu mu; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
subchannel_creation_phase creation_phase; |
|
|
|
subchannel_creation_phase creation_phase; |
|
|
|
grpc_connected_subchannel *connected_subchannel; |
|
|
|
grpc_connected_subchannel *connected_subchannel; |
|
|
|
grpc_polling_entity *pollent; |
|
|
|
grpc_polling_entity *pollent; |
|
|
@ -681,52 +671,32 @@ static void fail_locked(grpc_exec_ctx *exec_ctx, call_data *calld, |
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
typedef struct { |
|
|
|
|
|
|
|
grpc_transport_stream_op **ops; |
|
|
|
|
|
|
|
size_t nops; |
|
|
|
|
|
|
|
grpc_subchannel_call *call; |
|
|
|
|
|
|
|
} retry_ops_args; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void retry_ops(grpc_exec_ctx *exec_ctx, void *args, grpc_error *error) { |
|
|
|
|
|
|
|
retry_ops_args *a = args; |
|
|
|
|
|
|
|
size_t i; |
|
|
|
|
|
|
|
for (i = 0; i < a->nops; i++) { |
|
|
|
|
|
|
|
grpc_subchannel_call_process_op(exec_ctx, a->call, a->ops[i]); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, a->call, "retry_ops"); |
|
|
|
|
|
|
|
gpr_free(a->ops); |
|
|
|
|
|
|
|
gpr_free(a); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) { |
|
|
|
static void retry_waiting_locked(grpc_exec_ctx *exec_ctx, call_data *calld) { |
|
|
|
if (calld->waiting_ops_count == 0) { |
|
|
|
if (calld->waiting_ops_count == 0) { |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
retry_ops_args *a = gpr_malloc(sizeof(*a)); |
|
|
|
grpc_subchannel_call *call = GET_CALL(calld); |
|
|
|
a->ops = calld->waiting_ops; |
|
|
|
grpc_transport_stream_op **ops = calld->waiting_ops; |
|
|
|
a->nops = calld->waiting_ops_count; |
|
|
|
size_t nops = calld->waiting_ops_count; |
|
|
|
a->call = GET_CALL(calld); |
|
|
|
if (call == CANCELLED_CALL) { |
|
|
|
if (a->call == CANCELLED_CALL) { |
|
|
|
|
|
|
|
gpr_free(a); |
|
|
|
|
|
|
|
fail_locked(exec_ctx, calld, GRPC_ERROR_CANCELLED); |
|
|
|
fail_locked(exec_ctx, calld, GRPC_ERROR_CANCELLED); |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
calld->waiting_ops = NULL; |
|
|
|
calld->waiting_ops = NULL; |
|
|
|
calld->waiting_ops_count = 0; |
|
|
|
calld->waiting_ops_count = 0; |
|
|
|
calld->waiting_ops_capacity = 0; |
|
|
|
calld->waiting_ops_capacity = 0; |
|
|
|
GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops"); |
|
|
|
for (size_t i = 0; i < nops; i++) { |
|
|
|
grpc_closure_sched( |
|
|
|
grpc_subchannel_call_process_op(exec_ctx, call, ops[i]); |
|
|
|
exec_ctx, grpc_closure_create(retry_ops, a, grpc_schedule_on_exec_ctx), |
|
|
|
} |
|
|
|
GRPC_ERROR_NONE); |
|
|
|
gpr_free(ops); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
static void subchannel_ready_locked(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
grpc_error *error) { |
|
|
|
grpc_error *error) { |
|
|
|
grpc_call_element *elem = arg; |
|
|
|
grpc_call_element *elem = arg; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
gpr_mu_lock(&calld->mu); |
|
|
|
|
|
|
|
GPR_ASSERT(calld->creation_phase == |
|
|
|
GPR_ASSERT(calld->creation_phase == |
|
|
|
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL); |
|
|
|
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL); |
|
|
|
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, |
|
|
|
grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent, |
|
|
@ -762,7 +732,6 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
(gpr_atm)(uintptr_t)subchannel_call); |
|
|
|
(gpr_atm)(uintptr_t)subchannel_call); |
|
|
|
retry_waiting_locked(exec_ctx, calld); |
|
|
|
retry_waiting_locked(exec_ctx, calld); |
|
|
|
} |
|
|
|
} |
|
|
|
gpr_mu_unlock(&calld->mu); |
|
|
|
|
|
|
|
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); |
|
|
|
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -788,37 +757,35 @@ typedef struct { |
|
|
|
/** Return true if subchannel is available immediately (in which case on_ready
|
|
|
|
/** Return true if subchannel is available immediately (in which case on_ready
|
|
|
|
should not be called), or false otherwise (in which case on_ready should be |
|
|
|
should not be called), or false otherwise (in which case on_ready should be |
|
|
|
called when the subchannel is available). */ |
|
|
|
called when the subchannel is available). */ |
|
|
|
static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
|
|
|
static bool pick_subchannel_locked( |
|
|
|
grpc_metadata_batch *initial_metadata, |
|
|
|
grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
|
|
|
uint32_t initial_metadata_flags, |
|
|
|
grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags, |
|
|
|
grpc_connected_subchannel **connected_subchannel, |
|
|
|
grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready, |
|
|
|
grpc_closure *on_ready, grpc_error *error); |
|
|
|
grpc_error *error); |
|
|
|
|
|
|
|
|
|
|
|
static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
static void continue_picking_locked(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
grpc_error *error) { |
|
|
|
grpc_error *error) { |
|
|
|
continue_picking_args *cpa = arg; |
|
|
|
continue_picking_args *cpa = arg; |
|
|
|
if (cpa->connected_subchannel == NULL) { |
|
|
|
if (cpa->connected_subchannel == NULL) { |
|
|
|
/* cancelled, do nothing */ |
|
|
|
/* cancelled, do nothing */ |
|
|
|
} else if (error != GRPC_ERROR_NONE) { |
|
|
|
} else if (error != GRPC_ERROR_NONE) { |
|
|
|
grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error)); |
|
|
|
grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error)); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
call_data *calld = cpa->elem->call_data; |
|
|
|
if (pick_subchannel_locked(exec_ctx, cpa->elem, cpa->initial_metadata, |
|
|
|
gpr_mu_lock(&calld->mu); |
|
|
|
cpa->initial_metadata_flags, |
|
|
|
if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata, |
|
|
|
cpa->connected_subchannel, cpa->on_ready, |
|
|
|
cpa->initial_metadata_flags, cpa->connected_subchannel, |
|
|
|
GRPC_ERROR_NONE)) { |
|
|
|
cpa->on_ready, GRPC_ERROR_NONE)) { |
|
|
|
|
|
|
|
grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE); |
|
|
|
grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE); |
|
|
|
} |
|
|
|
} |
|
|
|
gpr_mu_unlock(&calld->mu); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
gpr_free(cpa); |
|
|
|
gpr_free(cpa); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
|
|
|
static bool pick_subchannel_locked( |
|
|
|
grpc_metadata_batch *initial_metadata, |
|
|
|
grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
|
|
|
uint32_t initial_metadata_flags, |
|
|
|
grpc_metadata_batch *initial_metadata, uint32_t initial_metadata_flags, |
|
|
|
grpc_connected_subchannel **connected_subchannel, |
|
|
|
grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready, |
|
|
|
grpc_closure *on_ready, grpc_error *error) { |
|
|
|
grpc_error *error) { |
|
|
|
GPR_TIMER_BEGIN("pick_subchannel", 0); |
|
|
|
GPR_TIMER_BEGIN("pick_subchannel", 0); |
|
|
|
|
|
|
|
|
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
@ -828,7 +795,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
|
|
|
|
|
|
|
|
|
|
|
GPR_ASSERT(connected_subchannel); |
|
|
|
GPR_ASSERT(connected_subchannel); |
|
|
|
|
|
|
|
|
|
|
|
gpr_mu_lock(&chand->mu); |
|
|
|
|
|
|
|
if (initial_metadata == NULL) { |
|
|
|
if (initial_metadata == NULL) { |
|
|
|
if (chand->lb_policy != NULL) { |
|
|
|
if (chand->lb_policy != NULL) { |
|
|
|
grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy, |
|
|
|
grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy, |
|
|
@ -844,7 +810,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
|
|
|
GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1)); |
|
|
|
GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1)); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
|
|
|
|
GPR_TIMER_END("pick_subchannel", 0); |
|
|
|
GPR_TIMER_END("pick_subchannel", 0); |
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
GRPC_ERROR_UNREF(error); |
|
|
|
return true; |
|
|
|
return true; |
|
|
@ -853,7 +818,6 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
|
|
|
if (chand->lb_policy != NULL) { |
|
|
|
if (chand->lb_policy != NULL) { |
|
|
|
grpc_lb_policy *lb_policy = chand->lb_policy; |
|
|
|
grpc_lb_policy *lb_policy = chand->lb_policy; |
|
|
|
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel"); |
|
|
|
GRPC_LB_POLICY_REF(lb_policy, "pick_subchannel"); |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
|
|
|
|
// If the application explicitly set wait_for_ready, use that.
|
|
|
|
// If the application explicitly set wait_for_ready, use that.
|
|
|
|
// Otherwise, if the service config specified a value for this
|
|
|
|
// Otherwise, if the service config specified a value for this
|
|
|
|
// method, use that.
|
|
|
|
// method, use that.
|
|
|
@ -892,88 +856,66 @@ static bool pick_subchannel(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
|
|
|
cpa->connected_subchannel = connected_subchannel; |
|
|
|
cpa->connected_subchannel = connected_subchannel; |
|
|
|
cpa->on_ready = on_ready; |
|
|
|
cpa->on_ready = on_ready; |
|
|
|
cpa->elem = elem; |
|
|
|
cpa->elem = elem; |
|
|
|
grpc_closure_init(&cpa->closure, continue_picking, cpa, |
|
|
|
grpc_closure_init(&cpa->closure, continue_picking_locked, cpa, |
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
grpc_combiner_scheduler(chand->combiner, true)); |
|
|
|
grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure, |
|
|
|
grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure, |
|
|
|
GRPC_ERROR_NONE); |
|
|
|
GRPC_ERROR_NONE); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
grpc_closure_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected")); |
|
|
|
grpc_closure_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected")); |
|
|
|
} |
|
|
|
} |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GPR_TIMER_END("pick_subchannel", 0); |
|
|
|
GPR_TIMER_END("pick_subchannel", 0); |
|
|
|
return false; |
|
|
|
return false; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// The logic here is fairly complicated, due to (a) the fact that we
|
|
|
|
static void start_transport_stream_op_locked_inner(grpc_exec_ctx *exec_ctx, |
|
|
|
// need to handle the case where we receive the send op before the
|
|
|
|
grpc_transport_stream_op *op, |
|
|
|
// initial metadata op, and (b) the need for efficiency, especially in
|
|
|
|
grpc_call_element *elem) { |
|
|
|
// the streaming case.
|
|
|
|
|
|
|
|
// TODO(ctiller): Explain this more thoroughly.
|
|
|
|
|
|
|
|
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
grpc_call_element *elem, |
|
|
|
|
|
|
|
grpc_transport_stream_op *op) { |
|
|
|
|
|
|
|
call_data *calld = elem->call_data; |
|
|
|
|
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
GRPC_CALL_LOG_OP(GPR_INFO, elem, op); |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op); |
|
|
|
grpc_subchannel_call *call; |
|
|
|
/* try to (atomically) get the call */ |
|
|
|
|
|
|
|
grpc_subchannel_call *call = GET_CALL(calld); |
|
|
|
|
|
|
|
GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0); |
|
|
|
|
|
|
|
if (call == CANCELLED_CALL) { |
|
|
|
|
|
|
|
grpc_transport_stream_op_finish_with_failure( |
|
|
|
|
|
|
|
exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); |
|
|
|
|
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op", 0); |
|
|
|
|
|
|
|
return; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (call != NULL) { |
|
|
|
|
|
|
|
grpc_subchannel_call_process_op(exec_ctx, call, op); |
|
|
|
|
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op", 0); |
|
|
|
|
|
|
|
return; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
/* we failed; lock and figure out what to do */ |
|
|
|
|
|
|
|
gpr_mu_lock(&calld->mu); |
|
|
|
|
|
|
|
retry: |
|
|
|
|
|
|
|
/* need to recheck that another thread hasn't set the call */ |
|
|
|
/* need to recheck that another thread hasn't set the call */ |
|
|
|
call = GET_CALL(calld); |
|
|
|
call = GET_CALL(calld); |
|
|
|
if (call == CANCELLED_CALL) { |
|
|
|
if (call == CANCELLED_CALL) { |
|
|
|
gpr_mu_unlock(&calld->mu); |
|
|
|
|
|
|
|
grpc_transport_stream_op_finish_with_failure( |
|
|
|
grpc_transport_stream_op_finish_with_failure( |
|
|
|
exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); |
|
|
|
exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); |
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op", 0); |
|
|
|
/* early out */ |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
if (call != NULL) { |
|
|
|
if (call != NULL) { |
|
|
|
gpr_mu_unlock(&calld->mu); |
|
|
|
|
|
|
|
grpc_subchannel_call_process_op(exec_ctx, call, op); |
|
|
|
grpc_subchannel_call_process_op(exec_ctx, call, op); |
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op", 0); |
|
|
|
/* early out */ |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
/* if this is a cancellation, then we can raise our cancelled flag */ |
|
|
|
/* if this is a cancellation, then we can raise our cancelled flag */ |
|
|
|
if (op->cancel_error != GRPC_ERROR_NONE) { |
|
|
|
if (op->cancel_error != GRPC_ERROR_NONE) { |
|
|
|
if (!gpr_atm_rel_cas(&calld->subchannel_call, 0, |
|
|
|
if (!gpr_atm_rel_cas(&calld->subchannel_call, 0, |
|
|
|
(gpr_atm)(uintptr_t)CANCELLED_CALL)) { |
|
|
|
(gpr_atm)(uintptr_t)CANCELLED_CALL)) { |
|
|
|
goto retry; |
|
|
|
/* recurse to retry */ |
|
|
|
|
|
|
|
start_transport_stream_op_locked_inner(exec_ctx, op, elem); |
|
|
|
|
|
|
|
/* early out */ |
|
|
|
|
|
|
|
return; |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
// Stash a copy of cancel_error in our call data, so that we can use
|
|
|
|
/* Stash a copy of cancel_error in our call data, so that we can use
|
|
|
|
// it for subsequent operations. This ensures that if the call is
|
|
|
|
it for subsequent operations. This ensures that if the call is |
|
|
|
// cancelled before any ops are passed down (e.g., if the deadline
|
|
|
|
cancelled before any ops are passed down (e.g., if the deadline |
|
|
|
// is in the past when the call starts), we can return the right
|
|
|
|
is in the past when the call starts), we can return the right |
|
|
|
// error to the caller when the first op does get passed down.
|
|
|
|
error to the caller when the first op does get passed down. */ |
|
|
|
calld->cancel_error = GRPC_ERROR_REF(op->cancel_error); |
|
|
|
calld->cancel_error = GRPC_ERROR_REF(op->cancel_error); |
|
|
|
switch (calld->creation_phase) { |
|
|
|
switch (calld->creation_phase) { |
|
|
|
case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING: |
|
|
|
case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING: |
|
|
|
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error)); |
|
|
|
fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error)); |
|
|
|
break; |
|
|
|
break; |
|
|
|
case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL: |
|
|
|
case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL: |
|
|
|
pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel, |
|
|
|
pick_subchannel_locked(exec_ctx, elem, NULL, 0, |
|
|
|
NULL, GRPC_ERROR_REF(op->cancel_error)); |
|
|
|
&calld->connected_subchannel, NULL, |
|
|
|
|
|
|
|
GRPC_ERROR_REF(op->cancel_error)); |
|
|
|
break; |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
gpr_mu_unlock(&calld->mu); |
|
|
|
|
|
|
|
grpc_transport_stream_op_finish_with_failure( |
|
|
|
grpc_transport_stream_op_finish_with_failure( |
|
|
|
exec_ctx, op, GRPC_ERROR_REF(op->cancel_error)); |
|
|
|
exec_ctx, op, GRPC_ERROR_REF(op->cancel_error)); |
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op", 0); |
|
|
|
/* early out */ |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
@ -982,16 +924,16 @@ retry: |
|
|
|
calld->connected_subchannel == NULL && |
|
|
|
calld->connected_subchannel == NULL && |
|
|
|
op->send_initial_metadata != NULL) { |
|
|
|
op->send_initial_metadata != NULL) { |
|
|
|
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL; |
|
|
|
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL; |
|
|
|
grpc_closure_init(&calld->next_step, subchannel_ready, elem, |
|
|
|
grpc_closure_init(&calld->next_step, subchannel_ready_locked, elem, |
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
grpc_combiner_scheduler(chand->combiner, true)); |
|
|
|
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); |
|
|
|
GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel"); |
|
|
|
/* If a subchannel is not available immediately, the polling entity from
|
|
|
|
/* If a subchannel is not available immediately, the polling entity from
|
|
|
|
call_data should be provided to channel_data's interested_parties, so |
|
|
|
call_data should be provided to channel_data's interested_parties, so |
|
|
|
that IO of the lb_policy and resolver could be done under it. */ |
|
|
|
that IO of the lb_policy and resolver could be done under it. */ |
|
|
|
if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata, |
|
|
|
if (pick_subchannel_locked(exec_ctx, elem, op->send_initial_metadata, |
|
|
|
op->send_initial_metadata_flags, |
|
|
|
op->send_initial_metadata_flags, |
|
|
|
&calld->connected_subchannel, &calld->next_step, |
|
|
|
&calld->connected_subchannel, &calld->next_step, |
|
|
|
GRPC_ERROR_NONE)) { |
|
|
|
GRPC_ERROR_NONE)) { |
|
|
|
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; |
|
|
|
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; |
|
|
|
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); |
|
|
|
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel"); |
|
|
|
} else { |
|
|
|
} else { |
|
|
@ -1014,31 +956,89 @@ retry: |
|
|
|
gpr_atm_rel_store(&calld->subchannel_call, |
|
|
|
gpr_atm_rel_store(&calld->subchannel_call, |
|
|
|
(gpr_atm)(uintptr_t)subchannel_call); |
|
|
|
(gpr_atm)(uintptr_t)subchannel_call); |
|
|
|
retry_waiting_locked(exec_ctx, calld); |
|
|
|
retry_waiting_locked(exec_ctx, calld); |
|
|
|
goto retry; |
|
|
|
/* recurse to retry */ |
|
|
|
|
|
|
|
start_transport_stream_op_locked_inner(exec_ctx, op, elem); |
|
|
|
|
|
|
|
/* early out */ |
|
|
|
|
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
/* nothing to be done but wait */ |
|
|
|
/* nothing to be done but wait */ |
|
|
|
add_waiting_locked(calld, op); |
|
|
|
add_waiting_locked(calld, op); |
|
|
|
gpr_mu_unlock(&calld->mu); |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void cc_start_transport_stream_op_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
void *arg, |
|
|
|
|
|
|
|
grpc_error *error_ignored) { |
|
|
|
|
|
|
|
GPR_TIMER_BEGIN("cc_start_transport_stream_op_locked", 0); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
grpc_transport_stream_op *op = arg; |
|
|
|
|
|
|
|
grpc_call_element *elem = op->handler_private.args[0]; |
|
|
|
|
|
|
|
call_data *calld = elem->call_data; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
start_transport_stream_op_locked_inner(exec_ctx, op, elem); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, |
|
|
|
|
|
|
|
"start_transport_stream_op"); |
|
|
|
|
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op_locked", 0); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* The logic here is fairly complicated, due to (a) the fact that we
|
|
|
|
|
|
|
|
need to handle the case where we receive the send op before the |
|
|
|
|
|
|
|
initial metadata op, and (b) the need for efficiency, especially in |
|
|
|
|
|
|
|
the streaming case. |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
We use double-checked locking to initially see if initialization has been |
|
|
|
|
|
|
|
performed. If it has not, we acquire the combiner and perform initialization. |
|
|
|
|
|
|
|
If it has, we proceed on the fast path. */ |
|
|
|
|
|
|
|
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
grpc_call_element *elem, |
|
|
|
|
|
|
|
grpc_transport_stream_op *op) { |
|
|
|
|
|
|
|
call_data *calld = elem->call_data; |
|
|
|
|
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
|
|
|
|
GRPC_CALL_LOG_OP(GPR_INFO, elem, op); |
|
|
|
|
|
|
|
grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op); |
|
|
|
|
|
|
|
/* try to (atomically) get the call */ |
|
|
|
|
|
|
|
grpc_subchannel_call *call = GET_CALL(calld); |
|
|
|
|
|
|
|
GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0); |
|
|
|
|
|
|
|
if (call == CANCELLED_CALL) { |
|
|
|
|
|
|
|
grpc_transport_stream_op_finish_with_failure( |
|
|
|
|
|
|
|
exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error)); |
|
|
|
|
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op", 0); |
|
|
|
|
|
|
|
/* early out */ |
|
|
|
|
|
|
|
return; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (call != NULL) { |
|
|
|
|
|
|
|
grpc_subchannel_call_process_op(exec_ctx, call, op); |
|
|
|
|
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op", 0); |
|
|
|
|
|
|
|
/* early out */ |
|
|
|
|
|
|
|
return; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
/* we failed; lock and figure out what to do */ |
|
|
|
|
|
|
|
GRPC_CALL_STACK_REF(calld->owning_call, "start_transport_stream_op"); |
|
|
|
|
|
|
|
op->handler_private.args[0] = elem; |
|
|
|
|
|
|
|
grpc_closure_sched( |
|
|
|
|
|
|
|
exec_ctx, |
|
|
|
|
|
|
|
grpc_closure_init(&op->handler_private.closure, |
|
|
|
|
|
|
|
cc_start_transport_stream_op_locked, op, |
|
|
|
|
|
|
|
grpc_combiner_scheduler(chand->combiner, false)), |
|
|
|
|
|
|
|
GRPC_ERROR_NONE); |
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op", 0); |
|
|
|
GPR_TIMER_END("cc_start_transport_stream_op", 0); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// Gets data from the service config. Invoked when the resolver returns
|
|
|
|
// Gets data from the service config. Invoked when the resolver returns
|
|
|
|
// its initial result.
|
|
|
|
// its initial result.
|
|
|
|
static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
static void read_service_config_locked(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
grpc_error *error) { |
|
|
|
grpc_error *error) { |
|
|
|
grpc_call_element *elem = arg; |
|
|
|
grpc_call_element *elem = arg; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
// If this is an error, there's no point in looking at the service config.
|
|
|
|
// If this is an error, there's no point in looking at the service config.
|
|
|
|
if (error == GRPC_ERROR_NONE) { |
|
|
|
if (error == GRPC_ERROR_NONE) { |
|
|
|
// Get the method config table from channel data.
|
|
|
|
// Get the method config table from channel data.
|
|
|
|
gpr_mu_lock(&chand->mu); |
|
|
|
|
|
|
|
grpc_slice_hash_table *method_params_table = NULL; |
|
|
|
grpc_slice_hash_table *method_params_table = NULL; |
|
|
|
if (chand->method_params_table != NULL) { |
|
|
|
if (chand->method_params_table != NULL) { |
|
|
|
method_params_table = |
|
|
|
method_params_table = |
|
|
|
grpc_slice_hash_table_ref(chand->method_params_table); |
|
|
|
grpc_slice_hash_table_ref(chand->method_params_table); |
|
|
|
} |
|
|
|
} |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
|
|
|
|
// If the method config table was present, use it.
|
|
|
|
// If the method config table was present, use it.
|
|
|
|
if (method_params_table != NULL) { |
|
|
|
if (method_params_table != NULL) { |
|
|
|
const method_parameters *method_params = grpc_method_config_table_get( |
|
|
|
const method_parameters *method_params = grpc_method_config_table_get( |
|
|
@ -1048,7 +1048,6 @@ static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
gpr_time_cmp(method_params->timeout, gpr_time_0(GPR_TIMESPAN)) != 0; |
|
|
|
gpr_time_cmp(method_params->timeout, gpr_time_0(GPR_TIMESPAN)) != 0; |
|
|
|
if (have_method_timeout || |
|
|
|
if (have_method_timeout || |
|
|
|
method_params->wait_for_ready != WAIT_FOR_READY_UNSET) { |
|
|
|
method_params->wait_for_ready != WAIT_FOR_READY_UNSET) { |
|
|
|
gpr_mu_lock(&calld->mu); |
|
|
|
|
|
|
|
if (have_method_timeout) { |
|
|
|
if (have_method_timeout) { |
|
|
|
const gpr_timespec per_method_deadline = |
|
|
|
const gpr_timespec per_method_deadline = |
|
|
|
gpr_time_add(calld->call_start_time, method_params->timeout); |
|
|
|
gpr_time_add(calld->call_start_time, method_params->timeout); |
|
|
@ -1062,7 +1061,6 @@ static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
calld->wait_for_ready_from_service_config = |
|
|
|
calld->wait_for_ready_from_service_config = |
|
|
|
method_params->wait_for_ready; |
|
|
|
method_params->wait_for_ready; |
|
|
|
} |
|
|
|
} |
|
|
|
gpr_mu_unlock(&calld->mu); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_slice_hash_table_unref(exec_ctx, method_params_table); |
|
|
|
grpc_slice_hash_table_unref(exec_ctx, method_params_table); |
|
|
@ -1071,43 +1069,25 @@ static void read_service_config(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config"); |
|
|
|
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "read_service_config"); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/* Constructor for call_data */ |
|
|
|
static void initial_read_service_config_locked(grpc_exec_ctx *exec_ctx, |
|
|
|
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
void *arg, |
|
|
|
grpc_call_element *elem, |
|
|
|
grpc_error *error_ignored) { |
|
|
|
grpc_call_element_args *args) { |
|
|
|
grpc_call_element *elem = arg; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
call_data *calld = elem->call_data; |
|
|
|
// Initialize data members.
|
|
|
|
|
|
|
|
grpc_deadline_state_init(exec_ctx, elem, args->call_stack); |
|
|
|
|
|
|
|
calld->path = grpc_slice_ref_internal(args->path); |
|
|
|
|
|
|
|
calld->call_start_time = args->start_time; |
|
|
|
|
|
|
|
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); |
|
|
|
|
|
|
|
calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET; |
|
|
|
|
|
|
|
calld->cancel_error = GRPC_ERROR_NONE; |
|
|
|
|
|
|
|
gpr_atm_rel_store(&calld->subchannel_call, 0); |
|
|
|
|
|
|
|
gpr_mu_init(&calld->mu); |
|
|
|
|
|
|
|
calld->connected_subchannel = NULL; |
|
|
|
|
|
|
|
calld->waiting_ops = NULL; |
|
|
|
|
|
|
|
calld->waiting_ops_count = 0; |
|
|
|
|
|
|
|
calld->waiting_ops_capacity = 0; |
|
|
|
|
|
|
|
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; |
|
|
|
|
|
|
|
calld->owning_call = args->call_stack; |
|
|
|
|
|
|
|
calld->pollent = NULL; |
|
|
|
|
|
|
|
// If the resolver has already returned results, then we can access
|
|
|
|
// If the resolver has already returned results, then we can access
|
|
|
|
// the service config parameters immediately. Otherwise, we need to
|
|
|
|
// the service config parameters immediately. Otherwise, we need to
|
|
|
|
// defer that work until the resolver returns an initial result.
|
|
|
|
// defer that work until the resolver returns an initial result.
|
|
|
|
// TODO(roth): This code is almost but not quite identical to the code
|
|
|
|
// TODO(roth): This code is almost but not quite identical to the code
|
|
|
|
// in read_service_config() above. It would be nice to find a way to
|
|
|
|
// in read_service_config() above. It would be nice to find a way to
|
|
|
|
// combine them, to avoid having to maintain it twice.
|
|
|
|
// combine them, to avoid having to maintain it twice.
|
|
|
|
gpr_mu_lock(&chand->mu); |
|
|
|
|
|
|
|
if (chand->lb_policy != NULL) { |
|
|
|
if (chand->lb_policy != NULL) { |
|
|
|
// We already have a resolver result, so check for service config.
|
|
|
|
// We already have a resolver result, so check for service config.
|
|
|
|
if (chand->method_params_table != NULL) { |
|
|
|
if (chand->method_params_table != NULL) { |
|
|
|
grpc_slice_hash_table *method_params_table = |
|
|
|
grpc_slice_hash_table *method_params_table = |
|
|
|
grpc_slice_hash_table_ref(chand->method_params_table); |
|
|
|
grpc_slice_hash_table_ref(chand->method_params_table); |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
|
|
|
|
method_parameters *method_params = grpc_method_config_table_get( |
|
|
|
method_parameters *method_params = grpc_method_config_table_get( |
|
|
|
exec_ctx, method_params_table, args->path); |
|
|
|
exec_ctx, method_params_table, calld->path); |
|
|
|
if (method_params != NULL) { |
|
|
|
if (method_params != NULL) { |
|
|
|
if (gpr_time_cmp(method_params->timeout, |
|
|
|
if (gpr_time_cmp(method_params->timeout, |
|
|
|
gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) { |
|
|
|
gpr_time_0(GPR_CLOCK_MONOTONIC)) != 0) { |
|
|
@ -1121,24 +1101,53 @@ static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_slice_hash_table_unref(exec_ctx, method_params_table); |
|
|
|
grpc_slice_hash_table_unref(exec_ctx, method_params_table); |
|
|
|
} else { |
|
|
|
|
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
// We don't yet have a resolver result, so register a callback to
|
|
|
|
// We don't yet have a resolver result, so register a callback to
|
|
|
|
// get the service config data once the resolver returns.
|
|
|
|
// get the service config data once the resolver returns.
|
|
|
|
// Take a reference to the call stack to be owned by the callback.
|
|
|
|
// Take a reference to the call stack to be owned by the callback.
|
|
|
|
GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config"); |
|
|
|
GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config"); |
|
|
|
grpc_closure_init(&calld->read_service_config, read_service_config, elem, |
|
|
|
grpc_closure_init(&calld->read_service_config, read_service_config_locked, |
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
elem, grpc_combiner_scheduler(chand->combiner, false)); |
|
|
|
grpc_closure_list_append(&chand->waiting_for_config_closures, |
|
|
|
grpc_closure_list_append(&chand->waiting_for_config_closures, |
|
|
|
&calld->read_service_config, GRPC_ERROR_NONE); |
|
|
|
&calld->read_service_config, GRPC_ERROR_NONE); |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
// Start the deadline timer with the current deadline value. If we
|
|
|
|
// Start the deadline timer with the current deadline value. If we
|
|
|
|
// do not yet have service config data, then the timer may be reset
|
|
|
|
// do not yet have service config data, then the timer may be reset
|
|
|
|
// later.
|
|
|
|
// later.
|
|
|
|
grpc_deadline_state_start(exec_ctx, elem, calld->deadline); |
|
|
|
grpc_deadline_state_start(exec_ctx, elem, calld->deadline); |
|
|
|
|
|
|
|
GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, |
|
|
|
|
|
|
|
"initial_read_service_config"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Constructor for call_data */ |
|
|
|
|
|
|
|
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
|
|
|
|
grpc_call_element *elem, |
|
|
|
|
|
|
|
grpc_call_element_args *args) { |
|
|
|
|
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
|
|
|
|
call_data *calld = elem->call_data; |
|
|
|
|
|
|
|
// Initialize data members.
|
|
|
|
|
|
|
|
grpc_deadline_state_init(exec_ctx, elem, args->call_stack); |
|
|
|
|
|
|
|
calld->path = grpc_slice_ref_internal(args->path); |
|
|
|
|
|
|
|
calld->call_start_time = args->start_time; |
|
|
|
|
|
|
|
calld->deadline = gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC); |
|
|
|
|
|
|
|
calld->wait_for_ready_from_service_config = WAIT_FOR_READY_UNSET; |
|
|
|
|
|
|
|
calld->cancel_error = GRPC_ERROR_NONE; |
|
|
|
|
|
|
|
gpr_atm_rel_store(&calld->subchannel_call, 0); |
|
|
|
|
|
|
|
calld->connected_subchannel = NULL; |
|
|
|
|
|
|
|
calld->waiting_ops = NULL; |
|
|
|
|
|
|
|
calld->waiting_ops_count = 0; |
|
|
|
|
|
|
|
calld->waiting_ops_capacity = 0; |
|
|
|
|
|
|
|
calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING; |
|
|
|
|
|
|
|
calld->owning_call = args->call_stack; |
|
|
|
|
|
|
|
calld->pollent = NULL; |
|
|
|
|
|
|
|
GRPC_CALL_STACK_REF(calld->owning_call, "initial_read_service_config"); |
|
|
|
|
|
|
|
grpc_closure_sched( |
|
|
|
|
|
|
|
exec_ctx, |
|
|
|
|
|
|
|
grpc_closure_init(&calld->read_service_config, |
|
|
|
|
|
|
|
initial_read_service_config_locked, elem, |
|
|
|
|
|
|
|
grpc_combiner_scheduler(chand->combiner, false)), |
|
|
|
|
|
|
|
GRPC_ERROR_NONE); |
|
|
|
return GRPC_ERROR_NONE; |
|
|
|
return GRPC_ERROR_NONE; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -1156,7 +1165,6 @@ static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx, |
|
|
|
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call"); |
|
|
|
GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, call, "client_channel_destroy_call"); |
|
|
|
} |
|
|
|
} |
|
|
|
GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING); |
|
|
|
GPR_ASSERT(calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING); |
|
|
|
gpr_mu_destroy(&calld->mu); |
|
|
|
|
|
|
|
GPR_ASSERT(calld->waiting_ops_count == 0); |
|
|
|
GPR_ASSERT(calld->waiting_ops_count == 0); |
|
|
|
if (calld->connected_subchannel != NULL) { |
|
|
|
if (calld->connected_subchannel != NULL) { |
|
|
|
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel, |
|
|
|
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel, |
|
|
@ -1192,26 +1200,36 @@ const grpc_channel_filter grpc_client_channel_filter = { |
|
|
|
"client-channel", |
|
|
|
"client-channel", |
|
|
|
}; |
|
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
|
|
|
|
grpc_error *error_ignored) { |
|
|
|
|
|
|
|
channel_data *chand = arg; |
|
|
|
|
|
|
|
if (chand->lb_policy != NULL) { |
|
|
|
|
|
|
|
grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
chand->exit_idle_when_lb_policy_arrives = true; |
|
|
|
|
|
|
|
if (!chand->started_resolving && chand->resolver != NULL) { |
|
|
|
|
|
|
|
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); |
|
|
|
|
|
|
|
chand->started_resolving = true; |
|
|
|
|
|
|
|
grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result, |
|
|
|
|
|
|
|
&chand->on_resolver_result_changed); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "try_to_connect"); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
grpc_connectivity_state grpc_client_channel_check_connectivity_state( |
|
|
|
grpc_connectivity_state grpc_client_channel_check_connectivity_state( |
|
|
|
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) { |
|
|
|
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) { |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
channel_data *chand = elem->channel_data; |
|
|
|
grpc_connectivity_state out; |
|
|
|
grpc_connectivity_state out = |
|
|
|
gpr_mu_lock(&chand->mu); |
|
|
|
grpc_connectivity_state_check(&chand->state_tracker); |
|
|
|
out = grpc_connectivity_state_check(&chand->state_tracker, NULL); |
|
|
|
|
|
|
|
if (out == GRPC_CHANNEL_IDLE && try_to_connect) { |
|
|
|
if (out == GRPC_CHANNEL_IDLE && try_to_connect) { |
|
|
|
if (chand->lb_policy != NULL) { |
|
|
|
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect"); |
|
|
|
grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy); |
|
|
|
grpc_closure_sched( |
|
|
|
} else { |
|
|
|
exec_ctx, |
|
|
|
chand->exit_idle_when_lb_policy_arrives = true; |
|
|
|
grpc_closure_create(try_to_connect_locked, chand, |
|
|
|
if (!chand->started_resolving && chand->resolver != NULL) { |
|
|
|
grpc_combiner_scheduler(chand->combiner, false)), |
|
|
|
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver"); |
|
|
|
GRPC_ERROR_NONE); |
|
|
|
chand->started_resolving = true; |
|
|
|
|
|
|
|
grpc_resolver_next(exec_ctx, chand->resolver, &chand->resolver_result, |
|
|
|
|
|
|
|
&chand->on_resolver_result_changed); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
|
|
|
|
return out; |
|
|
|
return out; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -1219,6 +1237,7 @@ typedef struct { |
|
|
|
channel_data *chand; |
|
|
|
channel_data *chand; |
|
|
|
grpc_pollset *pollset; |
|
|
|
grpc_pollset *pollset; |
|
|
|
grpc_closure *on_complete; |
|
|
|
grpc_closure *on_complete; |
|
|
|
|
|
|
|
grpc_connectivity_state *state; |
|
|
|
grpc_closure my_closure; |
|
|
|
grpc_closure my_closure; |
|
|
|
} external_connectivity_watcher; |
|
|
|
} external_connectivity_watcher; |
|
|
|
|
|
|
|
|
|
|
@ -1231,7 +1250,16 @@ static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, |
|
|
|
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, |
|
|
|
"external_connectivity_watcher"); |
|
|
|
"external_connectivity_watcher"); |
|
|
|
gpr_free(w); |
|
|
|
gpr_free(w); |
|
|
|
follow_up->cb(exec_ctx, follow_up->cb_arg, error); |
|
|
|
grpc_closure_run(exec_ctx, follow_up, GRPC_ERROR_REF(error)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg, |
|
|
|
|
|
|
|
grpc_error *error_ignored) { |
|
|
|
|
|
|
|
external_connectivity_watcher *w = arg; |
|
|
|
|
|
|
|
grpc_closure_init(&w->my_closure, on_external_watch_complete, w, |
|
|
|
|
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
|
|
|
|
grpc_connectivity_state_notify_on_state_change( |
|
|
|
|
|
|
|
exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
void grpc_client_channel_watch_connectivity_state( |
|
|
|
void grpc_client_channel_watch_connectivity_state( |
|
|
@ -1242,13 +1270,13 @@ void grpc_client_channel_watch_connectivity_state( |
|
|
|
w->chand = chand; |
|
|
|
w->chand = chand; |
|
|
|
w->pollset = pollset; |
|
|
|
w->pollset = pollset; |
|
|
|
w->on_complete = on_complete; |
|
|
|
w->on_complete = on_complete; |
|
|
|
|
|
|
|
w->state = state; |
|
|
|
grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset); |
|
|
|
grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset); |
|
|
|
grpc_closure_init(&w->my_closure, on_external_watch_complete, w, |
|
|
|
|
|
|
|
grpc_schedule_on_exec_ctx); |
|
|
|
|
|
|
|
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack, |
|
|
|
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack, |
|
|
|
"external_connectivity_watcher"); |
|
|
|
"external_connectivity_watcher"); |
|
|
|
gpr_mu_lock(&chand->mu); |
|
|
|
grpc_closure_sched( |
|
|
|
grpc_connectivity_state_notify_on_state_change( |
|
|
|
exec_ctx, |
|
|
|
exec_ctx, &chand->state_tracker, state, &w->my_closure); |
|
|
|
grpc_closure_init(&w->my_closure, watch_connectivity_state_locked, w, |
|
|
|
gpr_mu_unlock(&chand->mu); |
|
|
|
grpc_combiner_scheduler(chand->combiner, true)), |
|
|
|
|
|
|
|
GRPC_ERROR_NONE); |
|
|
|
} |
|
|
|
} |
|
|
|