Extract lb_calld from glb_policy

pull/13911/head
Juanli Shen 7 years ago
parent 633add8161
commit 8e4c9d308c
  1. 64
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
  2. 865
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  3. 47
      test/cpp/end2end/grpclb_end2end_test.cc

@ -69,11 +69,13 @@ static grpc_error* init_call_elem(grpc_call_element* elem,
call_data* calld = (call_data*)elem->call_data;
// Get stats object from context and take a ref.
GPR_ASSERT(args->context != nullptr);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr);
calld->client_stats = grpc_grpclb_client_stats_ref(
(grpc_grpclb_client_stats*)args->context[GRPC_GRPCLB_CLIENT_STATS].value);
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
if (args->context[GRPC_GRPCLB_CLIENT_STATS].value != nullptr) {
calld->client_stats = grpc_grpclb_client_stats_ref(
(grpc_grpclb_client_stats*)args->context[GRPC_GRPCLB_CLIENT_STATS]
.value);
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
}
return GRPC_ERROR_NONE;
}
@ -81,36 +83,40 @@ static void destroy_call_elem(grpc_call_element* elem,
const grpc_call_final_info* final_info,
grpc_closure* ignored) {
call_data* calld = (call_data*)elem->call_data;
// Record call finished, optionally setting client_failed_to_send and
// received.
grpc_grpclb_client_stats_add_call_finished(
!calld->send_initial_metadata_succeeded /* client_failed_to_send */,
calld->recv_initial_metadata_succeeded /* known_received */,
calld->client_stats);
// All done, so unref the stats object.
grpc_grpclb_client_stats_unref(calld->client_stats);
if (calld->client_stats != nullptr) {
// Record call finished, optionally setting client_failed_to_send and
// received.
grpc_grpclb_client_stats_add_call_finished(
!calld->send_initial_metadata_succeeded /* client_failed_to_send */,
calld->recv_initial_metadata_succeeded /* known_received */,
calld->client_stats);
// All done, so unref the stats object.
grpc_grpclb_client_stats_unref(calld->client_stats);
}
}
static void start_transport_stream_op_batch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
call_data* calld = (call_data*)elem->call_data;
GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {
calld->original_on_complete_for_send = batch->on_complete;
GRPC_CLOSURE_INIT(&calld->on_complete_for_send, on_complete_for_send, calld,
grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete_for_send;
}
// Intercept recv_initial_metadata.
if (batch->recv_initial_metadata) {
calld->original_recv_initial_metadata_ready =
batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, calld,
grpc_schedule_on_exec_ctx);
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
&calld->recv_initial_metadata_ready;
if (calld->client_stats != nullptr) {
// Intercept send_initial_metadata.
if (batch->send_initial_metadata) {
calld->original_on_complete_for_send = batch->on_complete;
GRPC_CLOSURE_INIT(&calld->on_complete_for_send, on_complete_for_send,
calld, grpc_schedule_on_exec_ctx);
batch->on_complete = &calld->on_complete_for_send;
}
// Intercept recv_initial_metadata.
if (batch->recv_initial_metadata) {
calld->original_recv_initial_metadata_ready =
batch->payload->recv_initial_metadata.recv_initial_metadata_ready;
GRPC_CLOSURE_INIT(&calld->recv_initial_metadata_ready,
recv_initial_metadata_ready, calld,
grpc_schedule_on_exec_ctx);
batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
&calld->recv_initial_metadata_ready;
}
}
// Chain to next filter.
grpc_call_next_op(elem, batch);

@ -220,30 +220,31 @@ class BalancerServiceImpl : public BalancerService {
if (client_load_reporting_interval_seconds_ > 0) {
request.Clear();
stream->Read(&request);
gpr_log(GPR_INFO, "LB[%p]: recv client load report msg: '%s'", this,
request.DebugString().c_str());
GPR_ASSERT(request.has_client_stats());
// We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed.
std::lock_guard<std::mutex> lock(mu_);
client_stats_.num_calls_started +=
request.client_stats().num_calls_started();
client_stats_.num_calls_finished +=
request.client_stats().num_calls_finished();
client_stats_.num_calls_finished_with_client_failed_to_send +=
request.client_stats()
.num_calls_finished_with_client_failed_to_send();
client_stats_.num_calls_finished_known_received +=
request.client_stats().num_calls_finished_known_received();
for (const auto& drop_token_count :
request.client_stats().calls_finished_with_drop()) {
client_stats_
.drop_token_counts[drop_token_count.load_balance_token()] +=
drop_token_count.num_calls();
if (stream->Read(&request)) {
gpr_log(GPR_INFO, "LB[%p]: recv client load report msg: '%s'", this,
request.DebugString().c_str());
GPR_ASSERT(request.has_client_stats());
// We need to acquire the lock here in order to prevent the notify_one
// below from firing before its corresponding wait is executed.
std::lock_guard<std::mutex> lock(mu_);
client_stats_.num_calls_started +=
request.client_stats().num_calls_started();
client_stats_.num_calls_finished +=
request.client_stats().num_calls_finished();
client_stats_.num_calls_finished_with_client_failed_to_send +=
request.client_stats()
.num_calls_finished_with_client_failed_to_send();
client_stats_.num_calls_finished_known_received +=
request.client_stats().num_calls_finished_known_received();
for (const auto& drop_token_count :
request.client_stats().calls_finished_with_drop()) {
client_stats_
.drop_token_counts[drop_token_count.load_balance_token()] +=
drop_token_count.num_calls();
}
load_report_ready_ = true;
load_report_cond_.notify_one();
}
load_report_ready_ = true;
load_report_cond_.notify_one();
}
done:
gpr_log(GPR_INFO, "LB[%p]: done", this);

Loading…
Cancel
Save