|
|
@ -175,6 +175,10 @@ typedef struct wrapped_rr_closure_arg { |
|
|
|
/* The RR instance related to the closure */ |
|
|
|
/* The RR instance related to the closure */ |
|
|
|
grpc_lb_policy* rr_policy; |
|
|
|
grpc_lb_policy* rr_policy; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* The grpclb instance that created the wrapping. This instance is not owned,
|
|
|
|
|
|
|
|
* reference counts are untouched. It's used only for logging purposes. */ |
|
|
|
|
|
|
|
grpc_lb_policy* glb_policy; |
|
|
|
|
|
|
|
|
|
|
|
/* heap memory to be freed upon closure execution. */ |
|
|
|
/* heap memory to be freed upon closure execution. */ |
|
|
|
void* free_when_done; |
|
|
|
void* free_when_done; |
|
|
|
} wrapped_rr_closure_arg; |
|
|
|
} wrapped_rr_closure_arg; |
|
|
@ -199,10 +203,11 @@ static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg, |
|
|
|
wc_arg->lb_token_mdelem_storage, |
|
|
|
wc_arg->lb_token_mdelem_storage, |
|
|
|
GRPC_MDELEM_REF(wc_arg->lb_token)); |
|
|
|
GRPC_MDELEM_REF(wc_arg->lb_token)); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
gpr_log(GPR_ERROR, |
|
|
|
gpr_log( |
|
|
|
"No LB token for connected subchannel pick %p (from RR " |
|
|
|
GPR_ERROR, |
|
|
|
"instance %p).", |
|
|
|
"[grpclb %p] No LB token for connected subchannel pick %p (from RR " |
|
|
|
(void*)*wc_arg->target, (void*)wc_arg->rr_policy); |
|
|
|
"instance %p).", |
|
|
|
|
|
|
|
wc_arg->glb_policy, *wc_arg->target, wc_arg->rr_policy); |
|
|
|
abort(); |
|
|
|
abort(); |
|
|
|
} |
|
|
|
} |
|
|
|
// Pass on client stats via context. Passes ownership of the reference.
|
|
|
|
// Pass on client stats via context. Passes ownership of the reference.
|
|
|
@ -213,7 +218,8 @@ static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg, |
|
|
|
grpc_grpclb_client_stats_unref(wc_arg->client_stats); |
|
|
|
grpc_grpclb_client_stats_unref(wc_arg->client_stats); |
|
|
|
} |
|
|
|
} |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, "Unreffing RR %p", (void*)wc_arg->rr_policy); |
|
|
|
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy, |
|
|
|
|
|
|
|
wc_arg->rr_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure"); |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure"); |
|
|
|
} |
|
|
|
} |
|
|
@ -619,8 +625,10 @@ static void update_lb_connectivity_status_locked( |
|
|
|
|
|
|
|
|
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log( |
|
|
|
gpr_log( |
|
|
|
GPR_INFO, "Setting grpclb's state to %s from new RR policy %p state.", |
|
|
|
GPR_INFO, |
|
|
|
grpc_connectivity_state_name(rr_state), (void*)glb_policy->rr_policy); |
|
|
|
"[grpclb %p] Setting grpclb's state to %s from new RR policy %p state.", |
|
|
|
|
|
|
|
glb_policy, grpc_connectivity_state_name(rr_state), |
|
|
|
|
|
|
|
glb_policy->rr_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state, |
|
|
|
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state, |
|
|
|
rr_state_error, |
|
|
|
rr_state_error, |
|
|
@ -647,8 +655,8 @@ static bool pick_from_internal_rr_locked( |
|
|
|
if (server->drop) { |
|
|
|
if (server->drop) { |
|
|
|
// Not using the RR policy, so unref it.
|
|
|
|
// Not using the RR policy, so unref it.
|
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")", |
|
|
|
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy, |
|
|
|
(intptr_t)wc_arg->rr_policy); |
|
|
|
wc_arg->rr_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync"); |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync"); |
|
|
|
// Update client load reporting stats to indicate the number of
|
|
|
|
// Update client load reporting stats to indicate the number of
|
|
|
@ -656,6 +664,7 @@ static bool pick_from_internal_rr_locked( |
|
|
|
// the client_load_reporting filter, because we do not create a
|
|
|
|
// the client_load_reporting filter, because we do not create a
|
|
|
|
// subchannel call (and therefore no client_load_reporting filter)
|
|
|
|
// subchannel call (and therefore no client_load_reporting filter)
|
|
|
|
// for dropped calls.
|
|
|
|
// for dropped calls.
|
|
|
|
|
|
|
|
GPR_ASSERT(wc_arg->client_stats != NULL); |
|
|
|
grpc_grpclb_client_stats_add_call_dropped_locked( |
|
|
|
grpc_grpclb_client_stats_add_call_dropped_locked( |
|
|
|
server->load_balance_token, wc_arg->client_stats); |
|
|
|
server->load_balance_token, wc_arg->client_stats); |
|
|
|
grpc_grpclb_client_stats_unref(wc_arg->client_stats); |
|
|
|
grpc_grpclb_client_stats_unref(wc_arg->client_stats); |
|
|
@ -676,8 +685,8 @@ static bool pick_from_internal_rr_locked( |
|
|
|
if (pick_done) { |
|
|
|
if (pick_done) { |
|
|
|
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */ |
|
|
|
/* synchronous grpc_lb_policy_pick call. Unref the RR policy. */ |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")", |
|
|
|
gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy, |
|
|
|
(intptr_t)wc_arg->rr_policy); |
|
|
|
wc_arg->rr_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync"); |
|
|
|
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync"); |
|
|
|
/* add the load reporting initial metadata */ |
|
|
|
/* add the load reporting initial metadata */ |
|
|
@ -748,12 +757,13 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy, |
|
|
|
grpc_lb_policy_create(exec_ctx, "round_robin", args); |
|
|
|
grpc_lb_policy_create(exec_ctx, "round_robin", args); |
|
|
|
if (new_rr_policy == NULL) { |
|
|
|
if (new_rr_policy == NULL) { |
|
|
|
gpr_log(GPR_ERROR, |
|
|
|
gpr_log(GPR_ERROR, |
|
|
|
"Failure creating a RoundRobin policy for serverlist update with " |
|
|
|
"[grpclb %p] Failure creating a RoundRobin policy for serverlist " |
|
|
|
"%lu entries. The previous RR instance (%p), if any, will continue " |
|
|
|
"update with %" PRIuPTR |
|
|
|
"to be used. Future updates from the LB will attempt to create new " |
|
|
|
" entries. The previous RR instance (%p), if any, will continue to " |
|
|
|
|
|
|
|
"be used. Future updates from the LB will attempt to create new " |
|
|
|
"instances.", |
|
|
|
"instances.", |
|
|
|
(unsigned long)glb_policy->serverlist->num_servers, |
|
|
|
glb_policy, glb_policy->serverlist->num_servers, |
|
|
|
(void*)glb_policy->rr_policy); |
|
|
|
glb_policy->rr_policy); |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
glb_policy->rr_policy = new_rr_policy; |
|
|
|
glb_policy->rr_policy = new_rr_policy; |
|
|
@ -797,8 +807,9 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy, |
|
|
|
pp->wrapped_on_complete_arg.client_stats = |
|
|
|
pp->wrapped_on_complete_arg.client_stats = |
|
|
|
grpc_grpclb_client_stats_ref(glb_policy->client_stats); |
|
|
|
grpc_grpclb_client_stats_ref(glb_policy->client_stats); |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, "Pending pick about to (async) PICK from %p", |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
(void*)glb_policy->rr_policy); |
|
|
|
"[grpclb %p] Pending pick about to (async) PICK from RR %p", |
|
|
|
|
|
|
|
glb_policy, glb_policy->rr_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args, |
|
|
|
pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args, |
|
|
|
true /* force_async */, pp->target, |
|
|
|
true /* force_async */, pp->target, |
|
|
@ -811,8 +822,8 @@ static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy, |
|
|
|
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping"); |
|
|
|
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping"); |
|
|
|
pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy; |
|
|
|
pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy; |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, "Pending ping about to PING from 0x%" PRIxPTR "", |
|
|
|
gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p", |
|
|
|
(intptr_t)glb_policy->rr_policy); |
|
|
|
glb_policy, glb_policy->rr_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, |
|
|
|
grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, |
|
|
|
&pping->wrapped_notify_arg.wrapper_closure); |
|
|
|
&pping->wrapped_notify_arg.wrapper_closure); |
|
|
@ -827,15 +838,15 @@ static void rr_handover_locked(grpc_exec_ctx* exec_ctx, |
|
|
|
GPR_ASSERT(args != NULL); |
|
|
|
GPR_ASSERT(args != NULL); |
|
|
|
if (glb_policy->rr_policy != NULL) { |
|
|
|
if (glb_policy->rr_policy != NULL) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_DEBUG, "Updating Round Robin policy (%p)", |
|
|
|
gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy, |
|
|
|
(void*)glb_policy->rr_policy); |
|
|
|
glb_policy->rr_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args); |
|
|
|
grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
create_rr_locked(exec_ctx, glb_policy, args); |
|
|
|
create_rr_locked(exec_ctx, glb_policy, args); |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_DEBUG, "Created new Round Robin policy (%p)", |
|
|
|
gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy, |
|
|
|
(void*)glb_policy->rr_policy); |
|
|
|
glb_policy->rr_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
lb_policy_args_destroy(exec_ctx, args); |
|
|
|
lb_policy_args_destroy(exec_ctx, args); |
|
|
@ -1177,8 +1188,8 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, |
|
|
|
if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { |
|
|
|
if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
"grpclb %p NOT picking from from RR %p: RR conn state=%s", |
|
|
|
"[grpclb %p] NOT picking from from RR %p: RR conn state=%s", |
|
|
|
(void*)glb_policy, (void*)glb_policy->rr_policy, |
|
|
|
glb_policy, glb_policy->rr_policy, |
|
|
|
grpc_connectivity_state_name(rr_connectivity_state)); |
|
|
|
grpc_connectivity_state_name(rr_connectivity_state)); |
|
|
|
} |
|
|
|
} |
|
|
|
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context, |
|
|
|
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context, |
|
|
@ -1186,8 +1197,8 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, |
|
|
|
pick_done = false; |
|
|
|
pick_done = false; |
|
|
|
} else { // RR not in shutdown
|
|
|
|
} else { // RR not in shutdown
|
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p", |
|
|
|
gpr_log(GPR_INFO, "[grpclb %p] about to PICK from RR %p", glb_policy, |
|
|
|
(void*)glb_policy, (void*)glb_policy->rr_policy); |
|
|
|
glb_policy->rr_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick"); |
|
|
|
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick"); |
|
|
|
wrapped_rr_closure_arg* wc_arg = |
|
|
|
wrapped_rr_closure_arg* wc_arg = |
|
|
@ -1204,6 +1215,7 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, |
|
|
|
wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage; |
|
|
|
wc_arg->lb_token_mdelem_storage = pick_args->lb_token_mdelem_storage; |
|
|
|
wc_arg->initial_metadata = pick_args->initial_metadata; |
|
|
|
wc_arg->initial_metadata = pick_args->initial_metadata; |
|
|
|
wc_arg->free_when_done = wc_arg; |
|
|
|
wc_arg->free_when_done = wc_arg; |
|
|
|
|
|
|
|
wc_arg->glb_policy = pol; |
|
|
|
pick_done = |
|
|
|
pick_done = |
|
|
|
pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args, |
|
|
|
pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args, |
|
|
|
false /* force_async */, target, wc_arg); |
|
|
|
false /* force_async */, target, wc_arg); |
|
|
@ -1211,9 +1223,8 @@ static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, |
|
|
|
} else { // glb_policy->rr_policy == NULL
|
|
|
|
} else { // glb_policy->rr_policy == NULL
|
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
"No RR policy in grpclb instance %p. Adding to grpclb's pending " |
|
|
|
"[grpclb %p] No RR policy. Adding to grpclb's pending picks", |
|
|
|
"picks", |
|
|
|
glb_policy); |
|
|
|
(void*)(glb_policy)); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context, |
|
|
|
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context, |
|
|
|
on_complete); |
|
|
|
on_complete); |
|
|
@ -1259,12 +1270,11 @@ static void lb_call_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg, |
|
|
|
grpc_error* error) { |
|
|
|
grpc_error* error) { |
|
|
|
glb_lb_policy* glb_policy = (glb_lb_policy*)arg; |
|
|
|
glb_lb_policy* glb_policy = (glb_lb_policy*)arg; |
|
|
|
glb_policy->retry_timer_active = false; |
|
|
|
glb_policy->retry_timer_active = false; |
|
|
|
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { |
|
|
|
if (!glb_policy->shutting_down && glb_policy->lb_call == NULL && |
|
|
|
|
|
|
|
error == GRPC_ERROR_NONE) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)", |
|
|
|
gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy); |
|
|
|
(void*)glb_policy); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
GPR_ASSERT(glb_policy->lb_call == NULL); |
|
|
|
|
|
|
|
query_for_backends_locked(exec_ctx, glb_policy); |
|
|
|
query_for_backends_locked(exec_ctx, glb_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer"); |
|
|
|
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer"); |
|
|
@ -1284,14 +1294,16 @@ static void maybe_restart_lb_call(grpc_exec_ctx* exec_ctx, |
|
|
|
grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state) |
|
|
|
grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state) |
|
|
|
.next_attempt_start_time; |
|
|
|
.next_attempt_start_time; |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...", |
|
|
|
gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...", |
|
|
|
(void*)glb_policy); |
|
|
|
glb_policy); |
|
|
|
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx); |
|
|
|
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx); |
|
|
|
if (timeout > 0) { |
|
|
|
if (timeout > 0) { |
|
|
|
gpr_log(GPR_DEBUG, "... retry_timer_active in %" PRIdPTR "ms.", |
|
|
|
gpr_log(GPR_DEBUG, |
|
|
|
timeout); |
|
|
|
"[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.", |
|
|
|
|
|
|
|
glb_policy, timeout); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
gpr_log(GPR_DEBUG, "... retry_timer_active immediately."); |
|
|
|
gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.", |
|
|
|
|
|
|
|
glb_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer"); |
|
|
|
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer"); |
|
|
@ -1392,7 +1404,7 @@ static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg, |
|
|
|
exec_ctx, glb_policy->lb_call, &op, 1, |
|
|
|
exec_ctx, glb_policy->lb_call, &op, 1, |
|
|
|
&glb_policy->client_load_report_closure); |
|
|
|
&glb_policy->client_load_report_closure); |
|
|
|
if (call_error != GRPC_CALL_OK) { |
|
|
|
if (call_error != GRPC_CALL_OK) { |
|
|
|
gpr_log(GPR_ERROR, "call_error=%d", call_error); |
|
|
|
gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error); |
|
|
|
GPR_ASSERT(GRPC_CALL_OK == call_error); |
|
|
|
GPR_ASSERT(GRPC_CALL_OK == call_error); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
@ -1486,9 +1498,8 @@ static void query_for_backends_locked(grpc_exec_ctx* exec_ctx, |
|
|
|
|
|
|
|
|
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
"Query for backends (grpclb: %p, lb_channel: %p, lb_call: %p)", |
|
|
|
"[grpclb %p] Query for backends (lb_channel: %p, lb_call: %p)", |
|
|
|
(void*)glb_policy, (void*)glb_policy->lb_channel, |
|
|
|
glb_policy, glb_policy->lb_channel, glb_policy->lb_call); |
|
|
|
(void*)glb_policy->lb_call); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
GPR_ASSERT(glb_policy->lb_call != NULL); |
|
|
|
GPR_ASSERT(glb_policy->lb_call != NULL); |
|
|
|
|
|
|
|
|
|
|
@ -1578,9 +1589,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg, |
|
|
|
&response->client_stats_report_interval)); |
|
|
|
&response->client_stats_report_interval)); |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
"received initial LB response message; " |
|
|
|
"[grpclb %p] Received initial LB response message; " |
|
|
|
"client load reporting interval = %" PRIdPTR " milliseconds", |
|
|
|
"client load reporting interval = %" PRIdPTR " milliseconds", |
|
|
|
glb_policy->client_stats_report_interval); |
|
|
|
glb_policy, glb_policy->client_stats_report_interval); |
|
|
|
} |
|
|
|
} |
|
|
|
/* take a weak ref (won't prevent calling of \a glb_shutdown() if the
|
|
|
|
/* take a weak ref (won't prevent calling of \a glb_shutdown() if the
|
|
|
|
* strong ref count goes to zero) to be unref'd in |
|
|
|
* strong ref count goes to zero) to be unref'd in |
|
|
@ -1590,8 +1601,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg, |
|
|
|
schedule_next_client_load_report(exec_ctx, glb_policy); |
|
|
|
schedule_next_client_load_report(exec_ctx, glb_policy); |
|
|
|
} else if (grpc_lb_glb_trace.enabled()) { |
|
|
|
} else if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
"received initial LB response message; " |
|
|
|
"[grpclb %p] Received initial LB response message; client load " |
|
|
|
"client load reporting NOT enabled"); |
|
|
|
"reporting NOT enabled", |
|
|
|
|
|
|
|
glb_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_grpclb_initial_response_destroy(response); |
|
|
|
grpc_grpclb_initial_response_destroy(response); |
|
|
|
glb_policy->seen_initial_response = true; |
|
|
|
glb_policy->seen_initial_response = true; |
|
|
@ -1601,14 +1613,16 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg, |
|
|
|
if (serverlist != NULL) { |
|
|
|
if (serverlist != NULL) { |
|
|
|
GPR_ASSERT(glb_policy->lb_call != NULL); |
|
|
|
GPR_ASSERT(glb_policy->lb_call != NULL); |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, "Serverlist with %lu servers received", |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
(unsigned long)serverlist->num_servers); |
|
|
|
"[grpclb %p] Serverlist with %" PRIuPTR " servers received", |
|
|
|
|
|
|
|
glb_policy, serverlist->num_servers); |
|
|
|
for (size_t i = 0; i < serverlist->num_servers; ++i) { |
|
|
|
for (size_t i = 0; i < serverlist->num_servers; ++i) { |
|
|
|
grpc_resolved_address addr; |
|
|
|
grpc_resolved_address addr; |
|
|
|
parse_server(serverlist->servers[i], &addr); |
|
|
|
parse_server(serverlist->servers[i], &addr); |
|
|
|
char* ipport; |
|
|
|
char* ipport; |
|
|
|
grpc_sockaddr_to_string(&ipport, &addr, false); |
|
|
|
grpc_sockaddr_to_string(&ipport, &addr, false); |
|
|
|
gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport); |
|
|
|
gpr_log(GPR_INFO, "[grpclb %p] Serverlist[%" PRIuPTR "]: %s", |
|
|
|
|
|
|
|
glb_policy, i, ipport); |
|
|
|
gpr_free(ipport); |
|
|
|
gpr_free(ipport); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
@ -1618,7 +1632,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg, |
|
|
|
serverlist)) { |
|
|
|
serverlist)) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
"Incoming server list identical to current, ignoring."); |
|
|
|
"[grpclb %p] Incoming server list identical to current, " |
|
|
|
|
|
|
|
"ignoring.", |
|
|
|
|
|
|
|
glb_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_grpclb_destroy_serverlist(serverlist); |
|
|
|
grpc_grpclb_destroy_serverlist(serverlist); |
|
|
|
} else { /* new serverlist */ |
|
|
|
} else { /* new serverlist */ |
|
|
@ -1644,12 +1660,16 @@ static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg, |
|
|
|
} |
|
|
|
} |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, "Received empty server list, ignoring."); |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
|
|
|
|
"[grpclb %p] Received empty server list, ignoring.", |
|
|
|
|
|
|
|
glb_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_grpclb_destroy_serverlist(serverlist); |
|
|
|
grpc_grpclb_destroy_serverlist(serverlist); |
|
|
|
} |
|
|
|
} |
|
|
|
} else { /* serverlist == NULL */ |
|
|
|
} else { /* serverlist == NULL */ |
|
|
|
gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.", |
|
|
|
gpr_log(GPR_ERROR, |
|
|
|
|
|
|
|
"[grpclb %p] Invalid LB response received: '%s'. Ignoring.", |
|
|
|
|
|
|
|
glb_policy, |
|
|
|
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX)); |
|
|
|
grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX)); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
@ -1689,8 +1709,8 @@ static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg, |
|
|
|
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { |
|
|
|
if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
"Falling back to use backends from resolver (grpclb %p)", |
|
|
|
"[grpclb %p] Falling back to use backends from resolver", |
|
|
|
(void*)glb_policy); |
|
|
|
glb_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); |
|
|
|
GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); |
|
|
|
rr_handover_locked(exec_ctx, glb_policy); |
|
|
|
rr_handover_locked(exec_ctx, glb_policy); |
|
|
@ -1708,10 +1728,10 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx, |
|
|
|
char* status_details = |
|
|
|
char* status_details = |
|
|
|
grpc_slice_to_c_string(glb_policy->lb_call_status_details); |
|
|
|
grpc_slice_to_c_string(glb_policy->lb_call_status_details); |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
"Status from LB server received. Status = %d, Details = '%s', " |
|
|
|
"[grpclb %p] Status from LB server received. Status = %d, Details " |
|
|
|
"(call: %p), error %p", |
|
|
|
"= '%s', (call: %p), error '%s'", |
|
|
|
glb_policy->lb_call_status, status_details, |
|
|
|
glb_policy, glb_policy->lb_call_status, status_details, |
|
|
|
(void*)glb_policy->lb_call, (void*)error); |
|
|
|
glb_policy->lb_call, grpc_error_string(error)); |
|
|
|
gpr_free(status_details); |
|
|
|
gpr_free(status_details); |
|
|
|
} |
|
|
|
} |
|
|
|
/* We need to perform cleanups no matter what. */ |
|
|
|
/* We need to perform cleanups no matter what. */ |
|
|
@ -1752,10 +1772,10 @@ static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy, |
|
|
|
"glb_update_missing"); |
|
|
|
"glb_update_missing"); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
// otherwise, keep using the current LB channel (ignore this update).
|
|
|
|
// otherwise, keep using the current LB channel (ignore this update).
|
|
|
|
gpr_log(GPR_ERROR, |
|
|
|
gpr_log( |
|
|
|
"No valid LB addresses channel arg for grpclb %p update, " |
|
|
|
GPR_ERROR, |
|
|
|
"ignoring.", |
|
|
|
"[grpclb %p] No valid LB addresses channel arg in update, ignoring.", |
|
|
|
(void*)glb_policy); |
|
|
|
glb_policy); |
|
|
|
} |
|
|
|
} |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
@ -1887,8 +1907,9 @@ static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx, |
|
|
|
glb_policy->server_name = |
|
|
|
glb_policy->server_name = |
|
|
|
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path); |
|
|
|
gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path); |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
if (grpc_lb_glb_trace.enabled()) { |
|
|
|
gpr_log(GPR_INFO, "Will use '%s' as the server name for LB request.", |
|
|
|
gpr_log(GPR_INFO, |
|
|
|
glb_policy->server_name); |
|
|
|
"[grpclb %p] Will use '%s' as the server name for LB request.", |
|
|
|
|
|
|
|
glb_policy, glb_policy->server_name); |
|
|
|
} |
|
|
|
} |
|
|
|
grpc_uri_destroy(uri); |
|
|
|
grpc_uri_destroy(uri); |
|
|
|
|
|
|
|
|
|
|
|