Add re-resolution into LB policies

pull/12829/head
Juanli Shen 7 years ago
parent 973854a845
commit 592cf34f91
  1. 235
      src/core/ext/filters/client_channel/client_channel.cc
  2. 27
      src/core/ext/filters/client_channel/lb_policy.cc
  3. 19
      src/core/ext/filters/client_channel/lb_policy.h
  4. 47
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  5. 98
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  6. 128
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc

@ -210,6 +210,14 @@ typedef struct client_channel_channel_data {
char* info_service_config_json; char* info_service_config_json;
} channel_data; } channel_data;
typedef struct {
channel_data* chand;
/** used as an identifier, don't dereference it because the LB policy may be
* non-existing when the callback is run */
grpc_lb_policy* lb_policy;
grpc_closure closure;
} reresolution_request_args;
/** We create one watcher for each new lb_policy that is returned from a /** We create one watcher for each new lb_policy that is returned from a
resolver, to watch for state changes from the lb_policy. When a state resolver, to watch for state changes from the lb_policy. When a state
change is seen, we update the channel, and create a new watcher. */ change is seen, we update the channel, and create a new watcher. */
@ -258,21 +266,13 @@ static void set_channel_connectivity_state_locked(grpc_exec_ctx* exec_ctx,
static void on_lb_policy_state_changed_locked(grpc_exec_ctx* exec_ctx, static void on_lb_policy_state_changed_locked(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* error) { void* arg, grpc_error* error) {
lb_policy_connectivity_watcher* w = (lb_policy_connectivity_watcher*)arg; lb_policy_connectivity_watcher* w = (lb_policy_connectivity_watcher*)arg;
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for the latest policy */ /* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy) { if (w->lb_policy == w->chand->lb_policy) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand, gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand,
w->lb_policy, grpc_connectivity_state_name(w->state)); w->lb_policy, grpc_connectivity_state_name(w->state));
} }
if (publish_state == GRPC_CHANNEL_SHUTDOWN && set_channel_connectivity_state_locked(exec_ctx, w->chand, w->state,
w->chand->resolver != nullptr) {
publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_resolver_channel_saw_error_locked(exec_ctx, w->chand->resolver);
GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
w->chand->lb_policy = nullptr;
}
set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
GRPC_ERROR_REF(error), "lb_changed"); GRPC_ERROR_REF(error), "lb_changed");
if (w->state != GRPC_CHANNEL_SHUTDOWN) { if (w->state != GRPC_CHANNEL_SHUTDOWN) {
watch_lb_policy_locked(exec_ctx, w->chand, w->lb_policy, w->state); watch_lb_policy_locked(exec_ctx, w->chand, w->lb_policy, w->state);
@ -369,6 +369,27 @@ static void parse_retry_throttle_params(const grpc_json* field, void* arg) {
} }
} }
static void request_reresolution_locked(grpc_exec_ctx* exec_ctx, void* arg,
grpc_error* error) {
reresolution_request_args* args = (reresolution_request_args*)arg;
channel_data* chand = args->chand;
// If this invocation is for a stale LB policy, treat it as an LB shutdown
// signal.
if (args->lb_policy != chand->lb_policy || error != GRPC_ERROR_NONE ||
chand->resolver == nullptr) {
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "re-resolution");
gpr_free(args);
return;
}
if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: started name re-resolving", chand);
}
grpc_resolver_channel_saw_error_locked(exec_ctx, chand->resolver);
// Give back the closure to the LB policy.
grpc_lb_policy_set_reresolve_closure_locked(exec_ctx, chand->lb_policy,
&args->closure);
}
static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx, static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
void* arg, grpc_error* error) { void* arg, grpc_error* error) {
channel_data* chand = (channel_data*)arg; channel_data* chand = (channel_data*)arg;
@ -385,100 +406,114 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
grpc_server_retry_throttle_data* retry_throttle_data = nullptr; grpc_server_retry_throttle_data* retry_throttle_data = nullptr;
grpc_slice_hash_table* method_params_table = nullptr; grpc_slice_hash_table* method_params_table = nullptr;
if (chand->resolver_result != nullptr) { if (chand->resolver_result != nullptr) {
// Find LB policy name. if (chand->resolver != nullptr) {
const char* lb_policy_name = nullptr; // Find LB policy name.
const grpc_arg* channel_arg = const char* lb_policy_name = nullptr;
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME); const grpc_arg* channel_arg = grpc_channel_args_find(
if (channel_arg != nullptr) { chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING); if (channel_arg != nullptr) {
lb_policy_name = channel_arg->value.string; GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
} lb_policy_name = channel_arg->value.string;
// Special case: If at least one balancer address is present, we use }
// the grpclb policy, regardless of what the resolver actually specified. // Special case: If at least one balancer address is present, we use
channel_arg = // the grpclb policy, regardless of what the resolver actually specified.
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES); channel_arg =
if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) { grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
grpc_lb_addresses* addresses = if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
(grpc_lb_addresses*)channel_arg->value.pointer.p; grpc_lb_addresses* addresses =
bool found_balancer_address = false; (grpc_lb_addresses*)channel_arg->value.pointer.p;
for (size_t i = 0; i < addresses->num_addresses; ++i) { bool found_balancer_address = false;
if (addresses->addresses[i].is_balancer) { for (size_t i = 0; i < addresses->num_addresses; ++i) {
found_balancer_address = true; if (addresses->addresses[i].is_balancer) {
break; found_balancer_address = true;
break;
}
}
if (found_balancer_address) {
if (lb_policy_name != nullptr &&
strcmp(lb_policy_name, "grpclb") != 0) {
gpr_log(GPR_INFO,
"resolver requested LB policy %s but provided at least one "
"balancer address -- forcing use of grpclb LB policy",
lb_policy_name);
}
lb_policy_name = "grpclb";
} }
} }
if (found_balancer_address) { // Use pick_first if nothing was specified and we didn't select grpclb
if (lb_policy_name != nullptr && // above.
strcmp(lb_policy_name, "grpclb") != 0) { if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
gpr_log(GPR_INFO, grpc_lb_policy_args lb_policy_args;
"resolver requested LB policy %s but provided at least one " lb_policy_args.args = chand->resolver_result;
"balancer address -- forcing use of grpclb LB policy", lb_policy_args.client_channel_factory = chand->client_channel_factory;
lb_policy_args.combiner = chand->combiner;
// Check to see if we're already using the right LB policy.
// Note: It's safe to use chand->info_lb_policy_name here without
// taking a lock on chand->info_mu, because this function is the
// only thing that modifies its value, and it can only be invoked
// once at any given time.
lb_policy_name_changed =
chand->info_lb_policy_name == nullptr ||
gpr_stricmp(chand->info_lb_policy_name, lb_policy_name) != 0;
if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
lb_policy_updated = true;
grpc_lb_policy_update_locked(exec_ctx, chand->lb_policy,
&lb_policy_args);
} else {
// Instantiate new LB policy.
new_lb_policy =
grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
if (new_lb_policy == nullptr) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"",
lb_policy_name); lb_policy_name);
} else {
reresolution_request_args* args =
(reresolution_request_args*)gpr_zalloc(sizeof(*args));
args->chand = chand;
args->lb_policy = new_lb_policy;
GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
grpc_combiner_scheduler(chand->combiner));
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
grpc_lb_policy_set_reresolve_closure_locked(exec_ctx, new_lb_policy,
&args->closure);
} }
lb_policy_name = "grpclb";
}
}
// Use pick_first if nothing was specified and we didn't select grpclb
// above.
if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
grpc_lb_policy_args lb_policy_args;
lb_policy_args.args = chand->resolver_result;
lb_policy_args.client_channel_factory = chand->client_channel_factory;
lb_policy_args.combiner = chand->combiner;
// Check to see if we're already using the right LB policy.
// Note: It's safe to use chand->info_lb_policy_name here without
// taking a lock on chand->info_mu, because this function is the
// only thing that modifies its value, and it can only be invoked
// once at any given time.
lb_policy_name_changed =
chand->info_lb_policy_name == nullptr ||
gpr_stricmp(chand->info_lb_policy_name, lb_policy_name) != 0;
if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
// Continue using the same LB policy. Update with new addresses.
lb_policy_updated = true;
grpc_lb_policy_update_locked(exec_ctx, chand->lb_policy, &lb_policy_args);
} else {
// Instantiate new LB policy.
new_lb_policy =
grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
if (new_lb_policy == nullptr) {
gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
} }
} // Find service config.
// Find service config. channel_arg = grpc_channel_args_find(chand->resolver_result,
channel_arg = GRPC_ARG_SERVICE_CONFIG);
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG); if (channel_arg != nullptr) {
if (channel_arg != nullptr) {
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
service_config_json = gpr_strdup(channel_arg->value.string);
grpc_service_config* service_config =
grpc_service_config_create(service_config_json);
if (service_config != nullptr) {
channel_arg =
grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
GPR_ASSERT(channel_arg != nullptr);
GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING); GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
grpc_uri* uri = service_config_json = gpr_strdup(channel_arg->value.string);
grpc_uri_parse(exec_ctx, channel_arg->value.string, true); grpc_service_config* service_config =
GPR_ASSERT(uri->path[0] != '\0'); grpc_service_config_create(service_config_json);
service_config_parsing_state parsing_state; if (service_config != nullptr) {
memset(&parsing_state, 0, sizeof(parsing_state)); channel_arg = grpc_channel_args_find(chand->resolver_result,
parsing_state.server_name = GRPC_ARG_SERVER_URI);
uri->path[0] == '/' ? uri->path + 1 : uri->path; GPR_ASSERT(channel_arg != nullptr);
grpc_service_config_parse_global_params( GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
service_config, parse_retry_throttle_params, &parsing_state); grpc_uri* uri =
grpc_uri_destroy(uri); grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
retry_throttle_data = parsing_state.retry_throttle_data; GPR_ASSERT(uri->path[0] != '\0');
method_params_table = grpc_service_config_create_method_config_table( service_config_parsing_state parsing_state;
exec_ctx, service_config, method_parameters_create_from_json, memset(&parsing_state, 0, sizeof(parsing_state));
method_parameters_ref_wrapper, method_parameters_unref_wrapper); parsing_state.server_name =
grpc_service_config_destroy(service_config); uri->path[0] == '/' ? uri->path + 1 : uri->path;
grpc_service_config_parse_global_params(
service_config, parse_retry_throttle_params, &parsing_state);
grpc_uri_destroy(uri);
retry_throttle_data = parsing_state.retry_throttle_data;
method_params_table = grpc_service_config_create_method_config_table(
exec_ctx, service_config, method_parameters_create_from_json,
method_parameters_ref_wrapper, method_parameters_unref_wrapper);
grpc_service_config_destroy(service_config);
}
} }
// Before we clean up, save a copy of lb_policy_name, since it might
// be pointing to data inside chand->resolver_result.
// The copy will be saved in chand->lb_policy_name below.
lb_policy_name_dup = gpr_strdup(lb_policy_name);
} }
// Before we clean up, save a copy of lb_policy_name, since it might
// be pointing to data inside chand->resolver_result.
// The copy will be saved in chand->lb_policy_name below.
lb_policy_name_dup = gpr_strdup(lb_policy_name);
grpc_channel_args_destroy(exec_ctx, chand->resolver_result); grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
chand->resolver_result = nullptr; chand->resolver_result = nullptr;
} }
@ -515,11 +550,11 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
} }
chand->method_params_table = method_params_table; chand->method_params_table = method_params_table;
// If we have a new LB policy or are shutting down (in which case // If we have a new LB policy or are shutting down (in which case
// new_lb_policy will be NULL), swap out the LB policy, unreffing the // new_lb_policy will be NULL), swap out the LB policy, unreffing the old one
// old one and removing its fds from chand->interested_parties. // and removing its fds from chand->interested_parties. Note that we do NOT do
// Note that we do NOT do this if either (a) we updated the existing // this if either (a) we updated the existing LB policy above or (b) we failed
// LB policy above or (b) we failed to create the new LB policy (in // to create the new LB policy (in which case we want to continue using the
// which case we want to continue using the most recent one we had). // most recent one we had).
if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE || if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
chand->resolver == nullptr) { chand->resolver == nullptr) {
if (chand->lb_policy != nullptr) { if (chand->lb_policy != nullptr) {

@ -161,3 +161,30 @@ void grpc_lb_policy_update_locked(grpc_exec_ctx* exec_ctx,
const grpc_lb_policy_args* lb_policy_args) { const grpc_lb_policy_args* lb_policy_args) {
policy->vtable->update_locked(exec_ctx, policy, lb_policy_args); policy->vtable->update_locked(exec_ctx, policy, lb_policy_args);
} }
void grpc_lb_policy_set_reresolve_closure_locked(
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_closure* request_reresolution) {
policy->vtable->set_reresolve_closure_locked(exec_ctx, policy,
request_reresolution);
}
void grpc_lb_policy_try_reresolve(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
grpc_core::TraceFlag* grpc_lb_trace,
grpc_error* error) {
if (policy->request_reresolution != nullptr) {
GRPC_CLOSURE_SCHED(exec_ctx, policy->request_reresolution, error);
policy->request_reresolution = nullptr;
if (grpc_lb_trace->enabled()) {
gpr_log(GPR_DEBUG,
"%s %p: scheduling re-resolution closure with error=%s.",
grpc_lb_trace->name(), policy, grpc_error_string(error));
}
} else {
if (grpc_lb_trace->enabled() && error == GRPC_ERROR_NONE) {
gpr_log(GPR_DEBUG, "%s %p: re-resolution already in progress.",
grpc_lb_trace->name(), policy);
}
}
}

@ -38,6 +38,8 @@ struct grpc_lb_policy {
grpc_pollset_set* interested_parties; grpc_pollset_set* interested_parties;
/* combiner under which lb_policy actions take place */ /* combiner under which lb_policy actions take place */
grpc_combiner* combiner; grpc_combiner* combiner;
/* callback to force a re-resolution */
grpc_closure* request_reresolution;
}; };
/** Extra arguments for an LB pick */ /** Extra arguments for an LB pick */
@ -96,6 +98,11 @@ struct grpc_lb_policy_vtable {
void (*update_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy, void (*update_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
const grpc_lb_policy_args* args); const grpc_lb_policy_args* args);
/** \see grpc_lb_policy_set_reresolve_closure */
void (*set_reresolve_closure_locked)(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
grpc_closure* request_reresolution);
}; };
#ifndef NDEBUG #ifndef NDEBUG
@ -202,4 +209,16 @@ void grpc_lb_policy_update_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy, grpc_lb_policy* policy,
const grpc_lb_policy_args* lb_policy_args); const grpc_lb_policy_args* lb_policy_args);
/** Set the re-resolution closure to \a request_reresolution. */
void grpc_lb_policy_set_reresolve_closure_locked(
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_closure* request_reresolution);
/** Try to request a re-resolution. It's NOT a public API; it's only for use by
the LB policy implementations. */
void grpc_lb_policy_try_reresolve(grpc_exec_ctx* exec_ctx,
grpc_lb_policy* policy,
grpc_core::TraceFlag* grpc_lb_trace,
grpc_error* error);
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H */ #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H */

@ -637,7 +637,7 @@ static void update_lb_connectivity_status_locked(
/* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return /* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
* immediately (ignoring its completion callback), we need to perform the * immediately (ignoring its completion callback), we need to perform the
* cleanups this callback would otherwise be resposible for. * cleanups this callback would otherwise be responsible for.
* If \a force_async is true, then we will manually schedule the * If \a force_async is true, then we will manually schedule the
* completion callback even if the pick is available immediately. */ * completion callback even if the pick is available immediately. */
static bool pick_from_internal_rr_locked( static bool pick_from_internal_rr_locked(
@ -766,6 +766,9 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
glb_policy->rr_policy); glb_policy->rr_policy);
return; return;
} }
grpc_lb_policy_set_reresolve_closure_locked(
exec_ctx, new_rr_policy, glb_policy->base.request_reresolution);
glb_policy->base.request_reresolution = nullptr;
glb_policy->rr_policy = new_rr_policy; glb_policy->rr_policy = new_rr_policy;
grpc_error* rr_state_error = nullptr; grpc_error* rr_state_error = nullptr;
const grpc_connectivity_state rr_state = const grpc_connectivity_state rr_state =
@ -991,6 +994,7 @@ static void glb_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) { static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
glb_lb_policy* glb_policy = (glb_lb_policy*)pol; glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
glb_policy->shutting_down = true; glb_policy->shutting_down = true;
/* We need a copy of the lb_call pointer because we can't cancell the call /* We need a copy of the lb_call pointer because we can't cancell the call
@ -1021,6 +1025,9 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
glb_policy->pending_pings = nullptr; glb_policy->pending_pings = nullptr;
if (glb_policy->rr_policy != nullptr) { if (glb_policy->rr_policy != nullptr) {
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown"); GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
} else {
grpc_lb_policy_try_reresolve(exec_ctx, pol, &grpc_lb_glb_trace,
GRPC_ERROR_CANCELLED);
} }
// We destroy the LB channel here because // We destroy the LB channel here because
// glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
@ -1030,28 +1037,27 @@ static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
grpc_channel_destroy(glb_policy->lb_channel); grpc_channel_destroy(glb_policy->lb_channel);
glb_policy->lb_channel = nullptr; glb_policy->lb_channel = nullptr;
} }
grpc_connectivity_state_set( grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN, GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown"); "glb_shutdown");
while (pp != nullptr) { while (pp != nullptr) {
pending_pick* next = pp->next; pending_pick* next = pp->next;
*pp->target = nullptr; *pp->target = nullptr;
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure, GRPC_ERROR_REF(error));
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
gpr_free(pp); gpr_free(pp);
pp = next; pp = next;
} }
while (pping != nullptr) { while (pping != nullptr) {
pending_ping* next = pping->next; pending_ping* next = pping->next;
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
exec_ctx, &pping->wrapped_notify_arg.wrapper_closure, GRPC_ERROR_REF(error));
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
gpr_free(pping); gpr_free(pping);
pping = next; pping = next;
} }
GRPC_ERROR_UNREF(error);
} }
// Cancel a specific pending pick. // Cancel a specific pending pick.
@ -1754,8 +1760,8 @@ static void fallback_update_locked(grpc_exec_ctx* exec_ctx,
grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses); grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
glb_policy->fallback_backend_addresses = glb_policy->fallback_backend_addresses =
extract_backend_addresses_locked(exec_ctx, addresses); extract_backend_addresses_locked(exec_ctx, addresses);
if (glb_policy->started_picking && glb_policy->lb_fallback_timeout_ms > 0 && if (glb_policy->lb_fallback_timeout_ms > 0 &&
!glb_policy->fallback_timer_active) { glb_policy->rr_policy != nullptr) {
rr_handover_locked(exec_ctx, glb_policy); rr_handover_locked(exec_ctx, glb_policy);
} }
} }
@ -1870,6 +1876,20 @@ static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
} }
} }
static void glb_set_reresolve_closure_locked(
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_closure* request_reresolution) {
glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
GPR_ASSERT(!glb_policy->shutting_down);
GPR_ASSERT(glb_policy->base.request_reresolution == nullptr);
if (glb_policy->rr_policy != nullptr) {
grpc_lb_policy_set_reresolve_closure_locked(exec_ctx, glb_policy->rr_policy,
request_reresolution);
} else {
glb_policy->base.request_reresolution = request_reresolution;
}
}
/* Code wiring the policy with the rest of the core */ /* Code wiring the policy with the rest of the core */
static const grpc_lb_policy_vtable glb_lb_policy_vtable = { static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
glb_destroy, glb_destroy,
@ -1881,7 +1901,8 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
glb_exit_idle_locked, glb_exit_idle_locked,
glb_check_connectivity_locked, glb_check_connectivity_locked,
glb_notify_on_state_change_locked, glb_notify_on_state_change_locked,
glb_update_locked}; glb_update_locked,
glb_set_reresolve_closure_locked};
static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx, static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
grpc_lb_policy_factory* factory, grpc_lb_policy_factory* factory,

@ -70,8 +70,9 @@ static void pf_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
} }
} }
static void shutdown_locked(grpc_exec_ctx* exec_ctx, pick_first_lb_policy* p, static void pf_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
grpc_error* error) { pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p); gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
} }
@ -96,14 +97,11 @@ static void shutdown_locked(grpc_exec_ctx* exec_ctx, pick_first_lb_policy* p,
exec_ctx, p->latest_pending_subchannel_list, "pf_shutdown"); exec_ctx, p->latest_pending_subchannel_list, "pf_shutdown");
p->latest_pending_subchannel_list = nullptr; p->latest_pending_subchannel_list = nullptr;
} }
grpc_lb_policy_try_reresolve(exec_ctx, &p->base, &grpc_lb_pick_first_trace,
GRPC_ERROR_CANCELLED);
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
static void pf_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
shutdown_locked(exec_ctx, (pick_first_lb_policy*)pol,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"));
}
static void pf_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, static void pf_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_connected_subchannel** target, grpc_connected_subchannel** target,
grpc_error* error) { grpc_error* error) {
@ -157,10 +155,15 @@ static void start_picking_locked(grpc_exec_ctx* exec_ctx,
if (p->subchannel_list != nullptr && if (p->subchannel_list != nullptr &&
p->subchannel_list->num_subchannels > 0) { p->subchannel_list->num_subchannels > 0) {
p->subchannel_list->checking_subchannel = 0; p->subchannel_list->checking_subchannel = 0;
grpc_lb_subchannel_list_ref_for_connectivity_watch( for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
p->subchannel_list, "connectivity_watch+start_picking"); if (p->subchannel_list->subchannels[i].subchannel != nullptr) {
grpc_lb_subchannel_data_start_connectivity_watch( grpc_lb_subchannel_list_ref_for_connectivity_watch(
exec_ctx, &p->subchannel_list->subchannels[0]); p->subchannel_list, "connectivity_watch+start_picking");
grpc_lb_subchannel_data_start_connectivity_watch(
exec_ctx, &p->subchannel_list->subchannels[i]);
break;
}
}
} }
} }
@ -404,6 +407,9 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
if (sd->curr_connectivity_state != GRPC_CHANNEL_READY && if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
p->latest_pending_subchannel_list != nullptr) { p->latest_pending_subchannel_list != nullptr) {
p->selected = nullptr; p->selected = nullptr;
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "selected_not_ready+switch_to_update");
grpc_lb_subchannel_list_shutdown_and_unref( grpc_lb_subchannel_list_shutdown_and_unref(
exec_ctx, p->subchannel_list, "selected_not_ready+switch_to_update"); exec_ctx, p->subchannel_list, "selected_not_ready+switch_to_update");
p->subchannel_list = p->latest_pending_subchannel_list; p->subchannel_list = p->latest_pending_subchannel_list;
@ -412,21 +418,35 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE, exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "selected_not_ready+switch_to_update"); GRPC_ERROR_REF(error), "selected_not_ready+switch_to_update");
} else { } else {
if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) { // TODO(juanlishen): we re-resolve when the selected subchannel goes to
/* if the selected channel goes bad, we're done */ // TRANSIENT_FAILURE because we used to shut down in this case before
sd->curr_connectivity_state = GRPC_CHANNEL_SHUTDOWN; // re-resolution is introduced. But we need to investigate whether we
// really want to take any action instead of waiting for the selected
// subchannel reconnecting.
if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN ||
sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
// If the selected channel goes bad, request a re-resolution.
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_IDLE, GRPC_ERROR_NONE,
"selected_changed+reresolve");
p->started_picking = false;
grpc_lb_policy_try_reresolve(
exec_ctx, &p->base, &grpc_lb_pick_first_trace, GRPC_ERROR_NONE);
} else {
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
sd->curr_connectivity_state,
GRPC_ERROR_REF(error), "selected_changed");
} }
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
sd->curr_connectivity_state,
GRPC_ERROR_REF(error), "selected_changed");
if (sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN) { if (sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
// Renew notification. // Renew notification.
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd); grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
} else { } else {
p->selected = nullptr;
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd); grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
grpc_lb_subchannel_list_unref_for_connectivity_watch( grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "pf_selected_shutdown"); exec_ctx, sd->subchannel_list, "pf_selected_shutdown");
shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error)); grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"pf_selected_shutdown");
} }
} }
return; return;
@ -531,24 +551,37 @@ static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
} while (sd->subchannel == nullptr && sd != original_sd); } while (sd->subchannel == nullptr && sd != original_sd);
if (sd == original_sd) { if (sd == original_sd) {
grpc_lb_subchannel_list_unref_for_connectivity_watch( grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "pf_candidate_shutdown"); exec_ctx, sd->subchannel_list, "pf_exhausted_subchannels");
shutdown_locked(exec_ctx, p, if (sd->subchannel_list == p->subchannel_list) {
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
"Pick first exhausted channels", &error, 1)); GRPC_CHANNEL_IDLE, GRPC_ERROR_NONE,
break; "exhausted_subchannels+reresolve");
} p->started_picking = false;
if (sd->subchannel_list == p->subchannel_list) { grpc_lb_policy_try_reresolve(
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, exec_ctx, &p->base, &grpc_lb_pick_first_trace, GRPC_ERROR_NONE);
GRPC_CHANNEL_TRANSIENT_FAILURE, }
GRPC_ERROR_REF(error), "subchannel_failed"); } else {
if (sd->subchannel_list == p->subchannel_list) {
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "subchannel_failed");
}
// Reuses the connectivity refs from the previous watch.
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
} }
// Reuses the connectivity refs from the previous watch.
grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
break;
} }
} }
} }
static void pf_set_reresolve_closure_locked(
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_closure* request_reresolution) {
pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
GPR_ASSERT(!p->shutdown);
GPR_ASSERT(policy->request_reresolution == nullptr);
policy->request_reresolution = request_reresolution;
}
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = { static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_destroy, pf_destroy,
pf_shutdown_locked, pf_shutdown_locked,
@ -559,7 +592,8 @@ static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_exit_idle_locked, pf_exit_idle_locked,
pf_check_connectivity_locked, pf_check_connectivity_locked,
pf_notify_on_state_change_locked, pf_notify_on_state_change_locked,
pf_update_locked}; pf_update_locked,
pf_set_reresolve_closure_locked};
static void pick_first_factory_ref(grpc_lb_policy_factory* factory) {} static void pick_first_factory_ref(grpc_lb_policy_factory* factory) {}

@ -20,9 +20,9 @@
* *
* Before every pick, the \a get_next_ready_subchannel_index_locked function * Before every pick, the \a get_next_ready_subchannel_index_locked function
* returns the p->subchannel_list->subchannels index for next subchannel, * returns the p->subchannel_list->subchannels index for next subchannel,
* respecting the relative * respecting the relative order of the addresses provided upon creation or
* order of the addresses provided upon creation or updates. Note however that * updates. Note however that updates will start picking from the beginning of
* updates will start picking from the beginning of the updated list. */ * the updated list. */
#include <string.h> #include <string.h>
@ -167,8 +167,9 @@ static void rr_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
gpr_free(p); gpr_free(p);
} }
static void shutdown_locked(grpc_exec_ctx* exec_ctx, round_robin_lb_policy* p, static void rr_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
grpc_error* error) { round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p); gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
} }
@ -194,15 +195,11 @@ static void shutdown_locked(grpc_exec_ctx* exec_ctx, round_robin_lb_policy* p,
"sl_shutdown_pending_rr_shutdown"); "sl_shutdown_pending_rr_shutdown");
p->latest_pending_subchannel_list = nullptr; p->latest_pending_subchannel_list = nullptr;
} }
grpc_lb_policy_try_reresolve(exec_ctx, &p->base, &grpc_lb_round_robin_trace,
GRPC_ERROR_CANCELLED);
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
} }
static void rr_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
shutdown_locked(exec_ctx, p,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
}
static void rr_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, static void rr_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
grpc_connected_subchannel** target, grpc_connected_subchannel** target,
grpc_error* error) { grpc_error* error) {
@ -255,10 +252,12 @@ static void start_picking_locked(grpc_exec_ctx* exec_ctx,
round_robin_lb_policy* p) { round_robin_lb_policy* p) {
p->started_picking = true; p->started_picking = true;
for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) { for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) {
grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list, if (p->subchannel_list->subchannels[i].subchannel != nullptr) {
"connectivity_watch"); grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list,
grpc_lb_subchannel_data_start_connectivity_watch( "connectivity_watch");
exec_ctx, &p->subchannel_list->subchannels[i]); grpc_lb_subchannel_data_start_connectivity_watch(
exec_ctx, &p->subchannel_list->subchannels[i]);
}
} }
} }
@ -346,71 +345,71 @@ static void update_state_counters_locked(grpc_lb_subchannel_data* sd) {
} }
/** Sets the policy's connectivity status based on that of the passed-in \a sd /** Sets the policy's connectivity status based on that of the passed-in \a sd
* (the grpc_lb_subchannel_data associted with the updated subchannel) and the * (the grpc_lb_subchannel_data associated with the updated subchannel) and the
* subchannel list \a sd belongs to (sd->subchannel_list). \a error will only be * subchannel list \a sd belongs to (sd->subchannel_list). \a error will be used
* used upon policy transition to TRANSIENT_FAILURE or SHUTDOWN. Returns the * only if the policy transitions to state TRANSIENT_FAILURE. */
* connectivity status set. */ static void update_lb_connectivity_status_locked(grpc_exec_ctx* exec_ctx,
static grpc_connectivity_state update_lb_connectivity_status_locked( grpc_lb_subchannel_data* sd,
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, grpc_error* error) { grpc_error* error) {
/* In priority order. The first rule to match terminates the search (ie, if we /* In priority order. The first rule to match terminates the search (ie, if we
* are on rule n, all previous rules were unfulfilled). * are on rule n, all previous rules were unfulfilled).
* *
* 1) RULE: ANY subchannel is READY => policy is READY. * 1) RULE: ANY subchannel is READY => policy is READY.
* CHECK: At least one subchannel is ready iff p->ready_list is NOT empty. * CHECK: subchannel_list->num_ready > 0.
* *
* 2) RULE: ANY subchannel is CONNECTING => policy is CONNECTING. * 2) RULE: ANY subchannel is CONNECTING => policy is CONNECTING.
* CHECK: sd->curr_connectivity_state == CONNECTING. * CHECK: sd->curr_connectivity_state == CONNECTING.
* *
* 3) RULE: ALL subchannels are SHUTDOWN => policy is SHUTDOWN. * 3) RULE: ALL subchannels are SHUTDOWN => policy is IDLE (and requests
* CHECK: p->subchannel_list->num_shutdown == * re-resolution).
* p->subchannel_list->num_subchannels. * CHECK: subchannel_list->num_shutdown ==
* subchannel_list->num_subchannels.
* *
* 4) RULE: ALL subchannels are TRANSIENT_FAILURE => policy is * 4) RULE: ALL subchannels are TRANSIENT_FAILURE => policy is
* TRANSIENT_FAILURE. * TRANSIENT_FAILURE.
* CHECK: p->num_transient_failures == p->subchannel_list->num_subchannels. * CHECK: subchannel_list->num_transient_failures ==
* subchannel_list->num_subchannels.
* *
* 5) RULE: ALL subchannels are IDLE => policy is IDLE. * 5) RULE: ALL subchannels are IDLE => policy is IDLE.
* CHECK: p->num_idle == p->subchannel_list->num_subchannels. * CHECK: subchannel_list->num_idle == subchannel_list->num_subchannels.
* (Note that all the subchannels will transition from IDLE to CONNECTING
* in batch when we start trying to connect.)
*/ */
grpc_connectivity_state new_state = sd->curr_connectivity_state; // TODO(juanlishen): if the subchannel states are mixed by {SHUTDOWN,
// TRANSIENT_FAILURE}, we don't change the state. We may want to improve on
// this.
grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list; grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
round_robin_lb_policy* p = (round_robin_lb_policy*)subchannel_list->policy; round_robin_lb_policy* p = (round_robin_lb_policy*)subchannel_list->policy;
if (subchannel_list->num_ready > 0) { /* 1) READY */ if (subchannel_list->num_ready > 0) {
/* 1) READY */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY, grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
GRPC_ERROR_NONE, "rr_ready"); GRPC_ERROR_NONE, "rr_ready");
new_state = GRPC_CHANNEL_READY; } else if (sd->curr_connectivity_state == GRPC_CHANNEL_CONNECTING) {
} else if (sd->curr_connectivity_state == /* 2) CONNECTING */
GRPC_CHANNEL_CONNECTING) { /* 2) CONNECTING */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE, GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
"rr_connecting"); "rr_connecting");
new_state = GRPC_CHANNEL_CONNECTING; } else if (subchannel_list->num_shutdown ==
} else if (p->subchannel_list->num_shutdown == subchannel_list->num_subchannels) {
p->subchannel_list->num_subchannels) { /* 3) SHUTDOWN */ /* 3) IDLE and re-resolve */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_IDLE,
GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
"rr_shutdown"); "rr_exhausted_subchannels+reresolve");
p->shutdown = true; p->started_picking = false;
new_state = GRPC_CHANNEL_SHUTDOWN; grpc_lb_policy_try_reresolve(exec_ctx, &p->base, &grpc_lb_round_robin_trace,
if (grpc_lb_round_robin_trace.enabled()) { GRPC_ERROR_NONE);
gpr_log(GPR_INFO,
"[RR %p] Shutting down: all subchannels have gone into shutdown",
(void*)p);
}
} else if (subchannel_list->num_transient_failures == } else if (subchannel_list->num_transient_failures ==
p->subchannel_list->num_subchannels) { /* 4) TRANSIENT_FAILURE */ subchannel_list->num_subchannels) {
/* 4) TRANSIENT_FAILURE */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE, GRPC_CHANNEL_TRANSIENT_FAILURE,
GRPC_ERROR_REF(error), "rr_transient_failure"); GRPC_ERROR_REF(error), "rr_transient_failure");
new_state = GRPC_CHANNEL_TRANSIENT_FAILURE; } else if (subchannel_list->num_idle == subchannel_list->num_subchannels) {
} else if (subchannel_list->num_idle == /* 5) IDLE */
p->subchannel_list->num_subchannels) { /* 5) IDLE */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_IDLE, grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_IDLE,
GRPC_ERROR_NONE, "rr_idle"); GRPC_ERROR_NONE, "rr_idle");
new_state = GRPC_CHANNEL_IDLE;
} }
GRPC_ERROR_UNREF(error); GRPC_ERROR_UNREF(error);
return new_state;
} }
static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg, static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
@ -454,21 +453,16 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
// state (which was set by the connectivity state watcher) to // state (which was set by the connectivity state watcher) to
// curr_connectivity_state, which is what we use inside of the combiner. // curr_connectivity_state, which is what we use inside of the combiner.
sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe; sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
// Update state counters and determine new overall state. // Update state counters and new overall state.
update_state_counters_locked(sd); update_state_counters_locked(sd);
const grpc_connectivity_state new_policy_connectivity_state = update_lb_connectivity_status_locked(exec_ctx, sd, GRPC_ERROR_REF(error));
update_lb_connectivity_status_locked(exec_ctx, sd, GRPC_ERROR_REF(error)); // If the sd's new state is SHUTDOWN, unref the subchannel.
// If the sd's new state is SHUTDOWN, unref the subchannel, and if the new
// policy's state is SHUTDOWN, clean up.
if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd); grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
"rr_connectivity_shutdown"); "rr_connectivity_shutdown");
grpc_lb_subchannel_list_unref_for_connectivity_watch( grpc_lb_subchannel_list_unref_for_connectivity_watch(
exec_ctx, sd->subchannel_list, "rr_connectivity_shutdown"); exec_ctx, sd->subchannel_list, "rr_connectivity_shutdown");
if (new_policy_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error));
}
} else { // sd not in SHUTDOWN } else { // sd not in SHUTDOWN
if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) { if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
if (sd->connected_subchannel == nullptr) { if (sd->connected_subchannel == nullptr) {
@ -504,7 +498,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
} }
/* at this point we know there's at least one suitable subchannel. Go /* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in * ahead and pick one and notify the pending suitors in
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */ * p->pending_picks. This preemptively replicates rr_pick()'s actions. */
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p); const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels); GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
grpc_lb_subchannel_data* selected = grpc_lb_subchannel_data* selected =
@ -642,6 +636,15 @@ static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
} }
} }
static void rr_set_reresolve_closure_locked(
grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
grpc_closure* request_reresolution) {
round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
GPR_ASSERT(!p->shutdown);
GPR_ASSERT(policy->request_reresolution == nullptr);
policy->request_reresolution = request_reresolution;
}
static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = { static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_destroy, rr_destroy,
rr_shutdown_locked, rr_shutdown_locked,
@ -652,7 +655,8 @@ static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_exit_idle_locked, rr_exit_idle_locked,
rr_check_connectivity_locked, rr_check_connectivity_locked,
rr_notify_on_state_change_locked, rr_notify_on_state_change_locked,
rr_update_locked}; rr_update_locked,
rr_set_reresolve_closure_locked};
static void round_robin_factory_ref(grpc_lb_policy_factory* factory) {} static void round_robin_factory_ref(grpc_lb_policy_factory* factory) {}

Loading…
Cancel
Save