[grpc][Gpr_To_Absl_Logging] Migrating from gpr to absl logging - gpr_log

pull/36549/head
tanvi-jagtap 9 months ago
parent 87b71907d6
commit ac4ed91721
  1. 5
      test/cpp/qps/BUILD
  2. 55
      test/cpp/qps/driver.cc
  3. 66
      test/cpp/qps/report.cc

@ -77,7 +77,10 @@ grpc_cc_library(
"driver.h",
"report.h",
],
external_deps = ["absl/log:check"],
external_deps = [
"absl/log:check",
"absl/log:log",
],
deps = [
":histogram",
":parse_json",

@ -25,8 +25,8 @@
#include <unordered_map>
#include <vector>
#include "absl/log/check.h"
#include "google/protobuf/timestamp.pb.h"
#include "third_party/absl/log/check.h"
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
@ -78,13 +78,13 @@ static deque<string> get_workers(const string& env_name) {
}
}
if (out.empty()) {
gpr_log(GPR_ERROR,
"Environment variable \"%s\" does not contain a list of QPS "
"workers to use. Set it to a comma-separated list of "
"hostname:port pairs, starting with hosts that should act as "
"servers. E.g. export "
"%s=\"serverhost1:1234,clienthost1:1234,clienthost2:1234\"",
env_name.c_str(), env_name.c_str());
LOG(ERROR) << "Environment variable \"" << env_name
<< "\" does not contain a list of QPS "
"workers to use. Set it to a comma-separated list of "
"hostname:port pairs, starting with hosts that should act as "
"servers. E.g. export "
<< env_name
<< "=\"serverhost1:1234,clienthost1:1234,clienthost2:1234\"";
}
return out;
}
@ -237,7 +237,7 @@ struct ServerData {
static void FinishClients(const std::vector<ClientData>& clients,
const ClientArgs& client_mark) {
gpr_log(GPR_INFO, "Finishing clients");
LOG(INFO) << "Finishing clients";
for (size_t i = 0, i_end = clients.size(); i < i_end; i++) {
auto client = &clients[i];
if (!client->stream->Write(client_mark)) {
@ -252,13 +252,13 @@ static void FinishClients(const std::vector<ClientData>& clients,
static void ReceiveFinalStatusFromClients(
const std::vector<ClientData>& clients, Histogram& merged_latencies,
std::unordered_map<int, int64_t>& merged_statuses, ScenarioResult& result) {
gpr_log(GPR_INFO, "Receiving final status from clients");
LOG(INFO) << "Receiving final status from clients";
ClientStatus client_status;
for (size_t i = 0, i_end = clients.size(); i < i_end; i++) {
auto client = &clients[i];
// Read the client final status
if (client->stream->Read(&client_status)) {
gpr_log(GPR_INFO, "Received final status from client %zu", i);
LOG(INFO) << "Received final status from client " << i;
const auto& stats = client_status.stats();
merged_latencies.MergeProto(stats.latencies());
for (int i = 0; i < stats.request_results_size(); i++) {
@ -282,7 +282,7 @@ static void ReceiveFinalStatusFromClients(
static void ShutdownClients(const std::vector<ClientData>& clients,
ScenarioResult& result) {
gpr_log(GPR_INFO, "Shutdown clients");
LOG(INFO) << "Shutdown clients";
for (size_t i = 0, i_end = clients.size(); i < i_end; i++) {
auto client = &clients[i];
Status s = client->stream->Finish();
@ -300,7 +300,7 @@ static void ShutdownClients(const std::vector<ClientData>& clients,
static void FinishServers(const std::vector<ServerData>& servers,
const ServerArgs& server_mark) {
gpr_log(GPR_INFO, "Finishing servers");
LOG(INFO) << "Finishing servers";
for (size_t i = 0, i_end = servers.size(); i < i_end; i++) {
auto server = &servers[i];
if (!server->stream->Write(server_mark)) {
@ -314,13 +314,13 @@ static void FinishServers(const std::vector<ServerData>& servers,
static void ReceiveFinalStatusFromServer(const std::vector<ServerData>& servers,
ScenarioResult& result) {
gpr_log(GPR_INFO, "Receiving final status from servers");
LOG(INFO) << "Receiving final status from servers";
ServerStatus server_status;
for (size_t i = 0, i_end = servers.size(); i < i_end; i++) {
auto server = &servers[i];
// Read the server final status
if (server->stream->Read(&server_status)) {
gpr_log(GPR_INFO, "Received final status from server %zu", i);
LOG(INFO) << "Received final status from server " << i;
result.add_server_stats()->CopyFrom(server_status.stats());
result.add_server_cores(server_status.cores());
// That final status should be the last message on the server stream
@ -334,7 +334,7 @@ static void ReceiveFinalStatusFromServer(const std::vector<ServerData>& servers,
static void ShutdownServers(const std::vector<ServerData>& servers,
ScenarioResult& result) {
gpr_log(GPR_INFO, "Shutdown servers");
LOG(INFO) << "Shutdown servers";
for (size_t i = 0, i_end = servers.size(); i < i_end; i++) {
auto server = &servers[i];
Status s = server->stream->Finish();
@ -430,8 +430,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
ChannelArguments channel_args;
for (size_t i = 0; i < num_servers; i++) {
gpr_log(GPR_INFO, "Starting server on %s (worker #%" PRIuPTR ")",
workers[i].c_str(), i);
LOG(INFO) << "Starting server on " << workers[i] << " (worker #" << i
<< ")";
if (!run_inproc) {
servers[i].stub = WorkerService::NewStub(grpc::CreateTestChannel(
workers[i],
@ -487,8 +487,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
size_t channels_allocated = 0;
for (size_t i = 0; i < num_clients; i++) {
const auto& worker = workers[i + num_servers];
gpr_log(GPR_INFO, "Starting client on %s (worker #%" PRIuPTR ")",
worker.c_str(), i + num_servers);
LOG(INFO) << "Starting client on " << worker << " (worker #"
<< i + num_servers << ")";
if (!run_inproc) {
clients[i].stub = WorkerService::NewStub(grpc::CreateTestChannel(
worker,
@ -510,8 +510,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
(client_config.client_channels() - channels_allocated) /
(num_clients - i);
channels_allocated += num_channels;
gpr_log(GPR_DEBUG, "Client %" PRIdPTR " gets %" PRIdPTR " channels", i,
num_channels);
VLOG(2) << "Client " << i << " gets " << num_channels << " channels";
per_client_config.set_client_channels(num_channels);
ClientArgs args;
@ -533,7 +532,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
// Send an initial mark: clients can use this to know that everything is ready
// to start
gpr_log(GPR_INFO, "Initiating");
LOG(INFO) << "Initiating";
ServerArgs server_mark;
server_mark.mutable_mark()->set_reset(true);
ClientArgs client_mark;
@ -555,13 +554,13 @@ std::unique_ptr<ScenarioResult> RunScenario(
}
// Let everything warmup
gpr_log(GPR_INFO, "Warming up");
LOG(INFO) << "Warming up";
gpr_timespec start = gpr_now(GPR_CLOCK_REALTIME);
gpr_sleep_until(
gpr_time_add(start, gpr_time_from_seconds(warmup_seconds, GPR_TIMESPAN)));
// Start a run
gpr_log(GPR_INFO, "Starting");
LOG(INFO) << "Starting";
auto start_time = time(nullptr);
@ -593,7 +592,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
}
// Wait some time
gpr_log(GPR_INFO, "Running");
LOG(INFO) << "Running";
// Use gpr_sleep_until rather than this_thread::sleep_until to support
// compilers that don't work with this_thread
gpr_sleep_until(gpr_time_add(
@ -669,8 +668,8 @@ bool RunQuit(
ctx.set_wait_for_ready(true);
Status s = stub->QuitWorker(&ctx, phony, &phony);
if (!s.ok()) {
gpr_log(GPR_ERROR, "Worker %zu could not be properly quit because %s", i,
s.error_message().c_str());
LOG(ERROR) << "Worker " << i << " could not be properly quit because "
<< s.error_message();
result = false;
}
}

@ -79,58 +79,52 @@ void CompositeReporter::ReportQueriesPerCpuSec(const ScenarioResult& result) {
}
void GprLogReporter::ReportQPS(const ScenarioResult& result) {
gpr_log(GPR_INFO, "QPS: %.1f", result.summary().qps());
LOG(INFO) << "QPS: " << result.summary().qps();
if (result.summary().failed_requests_per_second() > 0) {
gpr_log(GPR_INFO, "failed requests/second: %.1f",
result.summary().failed_requests_per_second());
gpr_log(GPR_INFO, "successful requests/second: %.1f",
result.summary().successful_requests_per_second());
LOG(INFO) << "failed requests/second: "
<< result.summary().failed_requests_per_second();
LOG(INFO) << "successful requests/second: "
<< result.summary().successful_requests_per_second();
}
}
void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) {
gpr_log(GPR_INFO, "QPS: %.1f (%.1f/server core)", result.summary().qps(),
result.summary().qps_per_server_core());
LOG(INFO) << "QPS: " << result.summary().qps() << " ("
<< result.summary().qps_per_server_core() << "/server core)";
}
void GprLogReporter::ReportLatency(const ScenarioResult& result) {
gpr_log(GPR_INFO,
"Latencies (50/90/95/99/99.9%%-ile): %.1f/%.1f/%.1f/%.1f/%.1f us",
result.summary().latency_50() / 1000,
result.summary().latency_90() / 1000,
result.summary().latency_95() / 1000,
result.summary().latency_99() / 1000,
result.summary().latency_999() / 1000);
LOG(INFO) << "Latencies (50/90/95/99/99.9%-ile): "
<< result.summary().latency_50() / 1000 << "/"
<< result.summary().latency_90() / 1000 << "/"
<< result.summary().latency_95() / 1000 << "/"
<< result.summary().latency_99() / 1000 << "/"
<< result.summary().latency_999() / 1000 << " us";
}
void GprLogReporter::ReportTimes(const ScenarioResult& result) {
gpr_log(GPR_INFO, "Server system time: %.2f%%",
result.summary().server_system_time());
gpr_log(GPR_INFO, "Server user time: %.2f%%",
result.summary().server_user_time());
gpr_log(GPR_INFO, "Client system time: %.2f%%",
result.summary().client_system_time());
gpr_log(GPR_INFO, "Client user time: %.2f%%",
result.summary().client_user_time());
LOG(INFO) << "Server system time: " << result.summary().server_system_time();
LOG(INFO) << "Server user time: " << result.summary().server_user_time();
LOG(INFO) << "Client system time: " << result.summary().client_system_time();
LOG(INFO) << "Client user time: " << result.summary().client_user_time();
}
void GprLogReporter::ReportCpuUsage(const ScenarioResult& result) {
gpr_log(GPR_INFO, "Server CPU usage: %.2f%%",
result.summary().server_cpu_usage());
LOG(INFO) << "Server CPU usage: " << result.summary().server_cpu_usage();
}
void GprLogReporter::ReportPollCount(const ScenarioResult& result) {
gpr_log(GPR_INFO, "Client Polls per Request: %.2f",
result.summary().client_polls_per_request());
gpr_log(GPR_INFO, "Server Polls per Request: %.2f",
result.summary().server_polls_per_request());
LOG(INFO) << "Client Polls per Request: "
<< result.summary().client_polls_per_request();
LOG(INFO) << "Server Polls per Request: "
<< result.summary().server_polls_per_request();
}
void GprLogReporter::ReportQueriesPerCpuSec(const ScenarioResult& result) {
gpr_log(GPR_INFO, "Server Queries/CPU-sec: %.2f",
result.summary().server_queries_per_cpu_sec());
gpr_log(GPR_INFO, "Client Queries/CPU-sec: %.2f",
result.summary().client_queries_per_cpu_sec());
LOG(INFO) << "Server Queries/CPU-sec: "
<< result.summary().server_queries_per_cpu_sec();
LOG(INFO) << "Client Queries/CPU-sec: "
<< result.summary().client_queries_per_cpu_sec();
}
void JsonReporter::ReportQPS(const ScenarioResult& result) {
@ -170,14 +164,14 @@ void RpcReporter::ReportQPS(const ScenarioResult& result) {
grpc::Status status;
Void phony;
gpr_log(GPR_INFO, "RPC reporter sending scenario result to server");
LOG(INFO) << "RPC reporter sending scenario result to server";
status = stub_->ReportScenario(&context, result, &phony);
if (status.ok()) {
gpr_log(GPR_INFO, "RpcReporter report RPC success!");
LOG(INFO) << "RpcReporter report RPC success!";
} else {
gpr_log(GPR_ERROR, "RpcReporter report RPC: code: %d. message: %s",
status.error_code(), status.error_message().c_str());
LOG(ERROR) << "RpcReporter report RPC: code: " << status.error_code()
<< ". message: " << status.error_message();
}
}

Loading…
Cancel
Save