Eliminate use of lambdas in ResourceUsage processing functions...

Introduce simple getters instead.
pull/2771/head
Vijay Pai 10 years ago
parent ab1dba72dc
commit 4d06e2eae9
  1. 49
      test/cpp/qps/driver.cc
  2. 16
      test/cpp/qps/driver.h
  3. 81
      test/cpp/qps/report.cc

@ -77,16 +77,33 @@ static deque<string> get_hosts(const string& name) {
}
}
// Namespace for classes and functions used only in RunScenario
// Using this rather than local definitions to workaround gcc-4.4 limitations
namespace runsc {
// ClientContext allocator
static ClientContext* AllocContext(list<ClientContext>* contexts) {
contexts->emplace_back();
return &contexts->back();
}
struct ServerData {
unique_ptr<Worker::Stub> stub;
unique_ptr<ClientReaderWriter<ServerArgs, ServerStatus>> stream;
};
struct ClientData {
unique_ptr<Worker::Stub> stub;
unique_ptr<ClientReaderWriter<ClientArgs, ClientStatus>> stream;
};
}
std::unique_ptr<ScenarioResult> RunScenario(
const ClientConfig& initial_client_config, size_t num_clients,
const ServerConfig& server_config, size_t num_servers, int warmup_seconds,
int benchmark_seconds, int spawn_local_worker_count) {
// ClientContext allocator (all are destroyed at scope exit)
// ClientContext allocations (all are destroyed at scope exit)
list<ClientContext> contexts;
auto alloc_context = [&contexts]() {
contexts.emplace_back();
return &contexts.back();
};
// To be added to the result, containing the final configuration used for
// client and config (incluiding host, etc.)
@ -131,10 +148,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
workers.resize(num_clients + num_servers);
// Start servers
struct ServerData {
unique_ptr<Worker::Stub> stub;
unique_ptr<ClientReaderWriter<ServerArgs, ServerStatus>> stream;
};
using runsc::ServerData;
vector<ServerData> servers;
for (size_t i = 0; i < num_servers; i++) {
ServerData sd;
@ -144,7 +158,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
result_server_config = server_config;
result_server_config.set_host(workers[i]);
*args.mutable_setup() = server_config;
sd.stream = std::move(sd.stub->RunServer(alloc_context()));
sd.stream = std::move(sd.stub->RunServer(runsc::AllocContext(&contexts)));
GPR_ASSERT(sd.stream->Write(args));
ServerStatus init_status;
GPR_ASSERT(sd.stream->Read(&init_status));
@ -162,10 +176,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
}
// Start clients
struct ClientData {
unique_ptr<Worker::Stub> stub;
unique_ptr<ClientReaderWriter<ClientArgs, ClientStatus>> stream;
};
using runsc::ClientData;
vector<ClientData> clients;
for (size_t i = 0; i < num_clients; i++) {
ClientData cd;
@ -175,7 +186,7 @@ std::unique_ptr<ScenarioResult> RunScenario(
result_client_config = client_config;
result_client_config.set_host(workers[i + num_servers]);
*args.mutable_setup() = client_config;
cd.stream = std::move(cd.stub->RunTest(alloc_context()));
cd.stream = std::move(cd.stub->RunTest(runsc::AllocContext(&contexts)));
GPR_ASSERT(cd.stream->Write(args));
ClientStatus init_status;
GPR_ASSERT(cd.stream->Read(&init_status));
@ -229,15 +240,15 @@ std::unique_ptr<ScenarioResult> RunScenario(
for (auto server = servers.begin(); server != servers.end(); server++) {
GPR_ASSERT(server->stream->Read(&server_status));
const auto& stats = server_status.stats();
result->server_resources.push_back(ResourceUsage{
stats.time_elapsed(), stats.time_user(), stats.time_system()});
result->server_resources.emplace_back(
stats.time_elapsed(), stats.time_user(), stats.time_system());
}
for (auto client = clients.begin(); client != clients.end(); client++) {
GPR_ASSERT(client->stream->Read(&client_status));
const auto& stats = client_status.stats();
result->latencies.MergeProto(stats.latencies());
result->client_resources.push_back(ResourceUsage{
stats.time_elapsed(), stats.time_user(), stats.time_system()});
result->client_resources.emplace_back(
stats.time_elapsed(), stats.time_user(), stats.time_system());
}
for (auto client = clients.begin(); client != clients.end(); client++) {

@ -41,10 +41,18 @@
namespace grpc {
namespace testing {
struct ResourceUsage {
double wall_time;
double user_time;
double system_time;
class ResourceUsage {
public:
ResourceUsage(double w, double u, double s)
: wall_time_(w), user_time_(u), system_time_(s) {}
double wall_time() { return wall_time_; }
double user_time() { return user_time_; }
double system_time() { return system_time_; }
private:
double wall_time_;
double user_time_;
double system_time_;
};
struct ScenarioResult {

@ -34,11 +34,16 @@
#include "test/cpp/qps/report.h"
#include <grpc/support/log.h>
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/stats.h"
namespace grpc {
namespace testing {
static double WallTime(ResourceUsage u) { return u.wall_time(); }
static double UserTime(ResourceUsage u) { return u.user_time(); }
static double SystemTime(ResourceUsage u) { return u.system_time(); }
void CompositeReporter::add(std::unique_ptr<Reporter> reporter) {
reporters_.emplace_back(std::move(reporter));
}
@ -68,16 +73,14 @@ void CompositeReporter::ReportTimes(const ScenarioResult& result) {
}
void GprLogReporter::ReportQPS(const ScenarioResult& result) {
gpr_log(GPR_INFO, "QPS: %.1f",
result.latencies.Count() /
average(result.client_resources,
[](ResourceUsage u) { return u.wall_time; }));
gpr_log(
GPR_INFO, "QPS: %.1f",
result.latencies.Count() / average(result.client_resources, WallTime));
}
void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) {
auto qps = result.latencies.Count() /
average(result.client_resources,
[](ResourceUsage u) { return u.wall_time; });
auto qps =
result.latencies.Count() / average(result.client_resources, WallTime);
gpr_log(GPR_INFO, "QPS: %.1f (%.1f/server core)", qps,
qps / result.server_config.threads());
@ -95,40 +98,30 @@ void GprLogReporter::ReportLatency(const ScenarioResult& result) {
void GprLogReporter::ReportTimes(const ScenarioResult& result) {
gpr_log(GPR_INFO, "Server system time: %.2f%%",
100.0 * sum(result.server_resources,
[](ResourceUsage u) { return u.system_time; }) /
sum(result.server_resources,
[](ResourceUsage u) { return u.wall_time; }));
100.0 * sum(result.server_resources, SystemTime) /
sum(result.server_resources, WallTime));
gpr_log(GPR_INFO, "Server user time: %.2f%%",
100.0 * sum(result.server_resources,
[](ResourceUsage u) { return u.user_time; }) /
sum(result.server_resources,
[](ResourceUsage u) { return u.wall_time; }));
100.0 * sum(result.server_resources, UserTime) /
sum(result.server_resources, WallTime));
gpr_log(GPR_INFO, "Client system time: %.2f%%",
100.0 * sum(result.client_resources,
[](ResourceUsage u) { return u.system_time; }) /
sum(result.client_resources,
[](ResourceUsage u) { return u.wall_time; }));
100.0 * sum(result.client_resources, SystemTime) /
sum(result.client_resources, WallTime));
gpr_log(GPR_INFO, "Client user time: %.2f%%",
100.0 * sum(result.client_resources,
[](ResourceUsage u) { return u.user_time; }) /
sum(result.client_resources,
[](ResourceUsage u) { return u.wall_time; }));
100.0 * sum(result.client_resources, UserTime) /
sum(result.client_resources, WallTime));
}
void PerfDbReporter::ReportQPS(const ScenarioResult& result) {
auto qps = result.latencies.Count() /
average(result.client_resources,
[](ResourceUsage u) { return u.wall_time; });
auto qps =
result.latencies.Count() / average(result.client_resources, WallTime);
perf_db_client_.setQps(qps);
perf_db_client_.setConfigs(result.client_config, result.server_config);
}
void PerfDbReporter::ReportQPSPerCore(const ScenarioResult& result) {
auto qps = result.latencies.Count() /
average(result.client_resources,
[](ResourceUsage u) { return u.wall_time; });
auto qps =
result.latencies.Count() / average(result.client_resources, WallTime);
auto qpsPerCore = qps / result.server_config.threads();
@ -147,25 +140,17 @@ void PerfDbReporter::ReportLatency(const ScenarioResult& result) {
}
void PerfDbReporter::ReportTimes(const ScenarioResult& result) {
double server_system_time =
100.0 * sum(result.server_resources,
[](ResourceUsage u) { return u.system_time; }) /
sum(result.server_resources, [](ResourceUsage u) { return u.wall_time; });
double server_user_time =
100.0 * sum(result.server_resources,
[](ResourceUsage u) { return u.user_time; }) /
sum(result.server_resources, [](ResourceUsage u) { return u.wall_time; });
double client_system_time =
100.0 * sum(result.client_resources,
[](ResourceUsage u) { return u.system_time; }) /
sum(result.client_resources, [](ResourceUsage u) { return u.wall_time; });
double client_user_time =
100.0 * sum(result.client_resources,
[](ResourceUsage u) { return u.user_time; }) /
sum(result.client_resources, [](ResourceUsage u) { return u.wall_time; });
perf_db_client_.setTimes(server_system_time, server_user_time, client_system_time,
client_user_time);
double server_system_time = 100.0 * sum(result.server_resources, SystemTime) /
sum(result.server_resources, WallTime);
double server_user_time = 100.0 * sum(result.server_resources, UserTime) /
sum(result.server_resources, WallTime);
double client_system_time = 100.0 * sum(result.client_resources, SystemTime) /
sum(result.client_resources, WallTime);
double client_user_time = 100.0 * sum(result.client_resources, UserTime) /
sum(result.client_resources, WallTime);
perf_db_client_.setTimes(server_system_time, server_user_time,
client_system_time, client_user_time);
perf_db_client_.setConfigs(result.client_config, result.server_config);
}

Loading…
Cancel
Save