Eliminate use of lambdas in ResourceUsage processing functions...

Introduce simple getters instead.
pull/2771/head
Vijay Pai 10 years ago
parent ab1dba72dc
commit 4d06e2eae9
  1. 49
      test/cpp/qps/driver.cc
  2. 16
      test/cpp/qps/driver.h
  3. 81
      test/cpp/qps/report.cc

@ -77,16 +77,33 @@ static deque<string> get_hosts(const string& name) {
} }
} }
// Namespace for classes and functions used only in RunScenario
// Using this rather than local definitions to workaround gcc-4.4 limitations
namespace runsc {
// ClientContext allocator
static ClientContext* AllocContext(list<ClientContext>* contexts) {
contexts->emplace_back();
return &contexts->back();
}
struct ServerData {
unique_ptr<Worker::Stub> stub;
unique_ptr<ClientReaderWriter<ServerArgs, ServerStatus>> stream;
};
struct ClientData {
unique_ptr<Worker::Stub> stub;
unique_ptr<ClientReaderWriter<ClientArgs, ClientStatus>> stream;
};
}
std::unique_ptr<ScenarioResult> RunScenario( std::unique_ptr<ScenarioResult> RunScenario(
const ClientConfig& initial_client_config, size_t num_clients, const ClientConfig& initial_client_config, size_t num_clients,
const ServerConfig& server_config, size_t num_servers, int warmup_seconds, const ServerConfig& server_config, size_t num_servers, int warmup_seconds,
int benchmark_seconds, int spawn_local_worker_count) { int benchmark_seconds, int spawn_local_worker_count) {
// ClientContext allocator (all are destroyed at scope exit) // ClientContext allocations (all are destroyed at scope exit)
list<ClientContext> contexts; list<ClientContext> contexts;
auto alloc_context = [&contexts]() {
contexts.emplace_back();
return &contexts.back();
};
// To be added to the result, containing the final configuration used for // To be added to the result, containing the final configuration used for
// client and config (incluiding host, etc.) // client and config (incluiding host, etc.)
@ -131,10 +148,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
workers.resize(num_clients + num_servers); workers.resize(num_clients + num_servers);
// Start servers // Start servers
struct ServerData { using runsc::ServerData;
unique_ptr<Worker::Stub> stub;
unique_ptr<ClientReaderWriter<ServerArgs, ServerStatus>> stream;
};
vector<ServerData> servers; vector<ServerData> servers;
for (size_t i = 0; i < num_servers; i++) { for (size_t i = 0; i < num_servers; i++) {
ServerData sd; ServerData sd;
@ -144,7 +158,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
result_server_config = server_config; result_server_config = server_config;
result_server_config.set_host(workers[i]); result_server_config.set_host(workers[i]);
*args.mutable_setup() = server_config; *args.mutable_setup() = server_config;
sd.stream = std::move(sd.stub->RunServer(alloc_context())); sd.stream = std::move(sd.stub->RunServer(runsc::AllocContext(&contexts)));
GPR_ASSERT(sd.stream->Write(args)); GPR_ASSERT(sd.stream->Write(args));
ServerStatus init_status; ServerStatus init_status;
GPR_ASSERT(sd.stream->Read(&init_status)); GPR_ASSERT(sd.stream->Read(&init_status));
@ -162,10 +176,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
} }
// Start clients // Start clients
struct ClientData { using runsc::ClientData;
unique_ptr<Worker::Stub> stub;
unique_ptr<ClientReaderWriter<ClientArgs, ClientStatus>> stream;
};
vector<ClientData> clients; vector<ClientData> clients;
for (size_t i = 0; i < num_clients; i++) { for (size_t i = 0; i < num_clients; i++) {
ClientData cd; ClientData cd;
@ -175,7 +186,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
result_client_config = client_config; result_client_config = client_config;
result_client_config.set_host(workers[i + num_servers]); result_client_config.set_host(workers[i + num_servers]);
*args.mutable_setup() = client_config; *args.mutable_setup() = client_config;
cd.stream = std::move(cd.stub->RunTest(alloc_context())); cd.stream = std::move(cd.stub->RunTest(runsc::AllocContext(&contexts)));
GPR_ASSERT(cd.stream->Write(args)); GPR_ASSERT(cd.stream->Write(args));
ClientStatus init_status; ClientStatus init_status;
GPR_ASSERT(cd.stream->Read(&init_status)); GPR_ASSERT(cd.stream->Read(&init_status));
@ -229,15 +240,15 @@ std::unique_ptr<ScenarioResult> RunScenario(
for (auto server = servers.begin(); server != servers.end(); server++) { for (auto server = servers.begin(); server != servers.end(); server++) {
GPR_ASSERT(server->stream->Read(&server_status)); GPR_ASSERT(server->stream->Read(&server_status));
const auto& stats = server_status.stats(); const auto& stats = server_status.stats();
result->server_resources.push_back(ResourceUsage{ result->server_resources.emplace_back(
stats.time_elapsed(), stats.time_user(), stats.time_system()}); stats.time_elapsed(), stats.time_user(), stats.time_system());
} }
for (auto client = clients.begin(); client != clients.end(); client++) { for (auto client = clients.begin(); client != clients.end(); client++) {
GPR_ASSERT(client->stream->Read(&client_status)); GPR_ASSERT(client->stream->Read(&client_status));
const auto& stats = client_status.stats(); const auto& stats = client_status.stats();
result->latencies.MergeProto(stats.latencies()); result->latencies.MergeProto(stats.latencies());
result->client_resources.push_back(ResourceUsage{ result->client_resources.emplace_back(
stats.time_elapsed(), stats.time_user(), stats.time_system()}); stats.time_elapsed(), stats.time_user(), stats.time_system());
} }
for (auto client = clients.begin(); client != clients.end(); client++) { for (auto client = clients.begin(); client != clients.end(); client++) {

@ -41,10 +41,18 @@
namespace grpc { namespace grpc {
namespace testing { namespace testing {
struct ResourceUsage { class ResourceUsage {
double wall_time; public:
double user_time; ResourceUsage(double w, double u, double s)
double system_time; : wall_time_(w), user_time_(u), system_time_(s) {}
double wall_time() { return wall_time_; }
double user_time() { return user_time_; }
double system_time() { return system_time_; }
private:
double wall_time_;
double user_time_;
double system_time_;
}; };
struct ScenarioResult { struct ScenarioResult {

@ -34,11 +34,16 @@
#include "test/cpp/qps/report.h" #include "test/cpp/qps/report.h"
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/stats.h" #include "test/cpp/qps/stats.h"
namespace grpc { namespace grpc {
namespace testing { namespace testing {
static double WallTime(ResourceUsage u) { return u.wall_time(); }
static double UserTime(ResourceUsage u) { return u.user_time(); }
static double SystemTime(ResourceUsage u) { return u.system_time(); }
void CompositeReporter::add(std::unique_ptr<Reporter> reporter) { void CompositeReporter::add(std::unique_ptr<Reporter> reporter) {
reporters_.emplace_back(std::move(reporter)); reporters_.emplace_back(std::move(reporter));
} }
@ -68,16 +73,14 @@ void CompositeReporter::ReportTimes(const ScenarioResult& result) {
} }
void GprLogReporter::ReportQPS(const ScenarioResult& result) { void GprLogReporter::ReportQPS(const ScenarioResult& result) {
gpr_log(GPR_INFO, "QPS: %.1f", gpr_log(
result.latencies.Count() / GPR_INFO, "QPS: %.1f",
average(result.client_resources, result.latencies.Count() / average(result.client_resources, WallTime));
[](ResourceUsage u) { return u.wall_time; }));
} }
void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) { void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) {
auto qps = result.latencies.Count() / auto qps =
average(result.client_resources, result.latencies.Count() / average(result.client_resources, WallTime);
[](ResourceUsage u) { return u.wall_time; });
gpr_log(GPR_INFO, "QPS: %.1f (%.1f/server core)", qps, gpr_log(GPR_INFO, "QPS: %.1f (%.1f/server core)", qps,
qps / result.server_config.threads()); qps / result.server_config.threads());
@ -95,40 +98,30 @@ void GprLogReporter::ReportLatency(const ScenarioResult& result) {
void GprLogReporter::ReportTimes(const ScenarioResult& result) { void GprLogReporter::ReportTimes(const ScenarioResult& result) {
gpr_log(GPR_INFO, "Server system time: %.2f%%", gpr_log(GPR_INFO, "Server system time: %.2f%%",
100.0 * sum(result.server_resources, 100.0 * sum(result.server_resources, SystemTime) /
[](ResourceUsage u) { return u.system_time; }) / sum(result.server_resources, WallTime));
sum(result.server_resources,
[](ResourceUsage u) { return u.wall_time; }));
gpr_log(GPR_INFO, "Server user time: %.2f%%", gpr_log(GPR_INFO, "Server user time: %.2f%%",
100.0 * sum(result.server_resources, 100.0 * sum(result.server_resources, UserTime) /
[](ResourceUsage u) { return u.user_time; }) / sum(result.server_resources, WallTime));
sum(result.server_resources,
[](ResourceUsage u) { return u.wall_time; }));
gpr_log(GPR_INFO, "Client system time: %.2f%%", gpr_log(GPR_INFO, "Client system time: %.2f%%",
100.0 * sum(result.client_resources, 100.0 * sum(result.client_resources, SystemTime) /
[](ResourceUsage u) { return u.system_time; }) / sum(result.client_resources, WallTime));
sum(result.client_resources,
[](ResourceUsage u) { return u.wall_time; }));
gpr_log(GPR_INFO, "Client user time: %.2f%%", gpr_log(GPR_INFO, "Client user time: %.2f%%",
100.0 * sum(result.client_resources, 100.0 * sum(result.client_resources, UserTime) /
[](ResourceUsage u) { return u.user_time; }) / sum(result.client_resources, WallTime));
sum(result.client_resources,
[](ResourceUsage u) { return u.wall_time; }));
} }
void PerfDbReporter::ReportQPS(const ScenarioResult& result) { void PerfDbReporter::ReportQPS(const ScenarioResult& result) {
auto qps = result.latencies.Count() / auto qps =
average(result.client_resources, result.latencies.Count() / average(result.client_resources, WallTime);
[](ResourceUsage u) { return u.wall_time; });
perf_db_client_.setQps(qps); perf_db_client_.setQps(qps);
perf_db_client_.setConfigs(result.client_config, result.server_config); perf_db_client_.setConfigs(result.client_config, result.server_config);
} }
void PerfDbReporter::ReportQPSPerCore(const ScenarioResult& result) { void PerfDbReporter::ReportQPSPerCore(const ScenarioResult& result) {
auto qps = result.latencies.Count() / auto qps =
average(result.client_resources, result.latencies.Count() / average(result.client_resources, WallTime);
[](ResourceUsage u) { return u.wall_time; });
auto qpsPerCore = qps / result.server_config.threads(); auto qpsPerCore = qps / result.server_config.threads();
@ -147,25 +140,17 @@ void PerfDbReporter::ReportLatency(const ScenarioResult& result) {
} }
void PerfDbReporter::ReportTimes(const ScenarioResult& result) { void PerfDbReporter::ReportTimes(const ScenarioResult& result) {
double server_system_time = double server_system_time = 100.0 * sum(result.server_resources, SystemTime) /
100.0 * sum(result.server_resources, sum(result.server_resources, WallTime);
[](ResourceUsage u) { return u.system_time; }) / double server_user_time = 100.0 * sum(result.server_resources, UserTime) /
sum(result.server_resources, [](ResourceUsage u) { return u.wall_time; }); sum(result.server_resources, WallTime);
double server_user_time = double client_system_time = 100.0 * sum(result.client_resources, SystemTime) /
100.0 * sum(result.server_resources, sum(result.client_resources, WallTime);
[](ResourceUsage u) { return u.user_time; }) / double client_user_time = 100.0 * sum(result.client_resources, UserTime) /
sum(result.server_resources, [](ResourceUsage u) { return u.wall_time; }); sum(result.client_resources, WallTime);
double client_system_time =
100.0 * sum(result.client_resources, perf_db_client_.setTimes(server_system_time, server_user_time,
[](ResourceUsage u) { return u.system_time; }) / client_system_time, client_user_time);
sum(result.client_resources, [](ResourceUsage u) { return u.wall_time; });
double client_user_time =
100.0 * sum(result.client_resources,
[](ResourceUsage u) { return u.user_time; }) /
sum(result.client_resources, [](ResourceUsage u) { return u.wall_time; });
perf_db_client_.setTimes(server_system_time, server_user_time, client_system_time,
client_user_time);
perf_db_client_.setConfigs(result.client_config, result.server_config); perf_db_client_.setConfigs(result.client_config, result.server_config);
} }

Loading…
Cancel
Save