|
|
@ -134,36 +134,38 @@ static bool IsSuccess(const Status& s) { |
|
|
|
|
|
|
|
|
|
|
|
// Postprocess ScenarioResult and populate result summary.
|
|
|
|
// Postprocess ScenarioResult and populate result summary.
|
|
|
|
static void postprocess_scenario_result(ScenarioResult* result) { |
|
|
|
static void postprocess_scenario_result(ScenarioResult* result) { |
|
|
|
|
|
|
|
// Get latencies from ScenarioResult latencies histogram and populate to
|
|
|
|
|
|
|
|
// result summary.
|
|
|
|
Histogram histogram; |
|
|
|
Histogram histogram; |
|
|
|
histogram.MergeProto(result->latencies()); |
|
|
|
histogram.MergeProto(result->latencies()); |
|
|
|
|
|
|
|
|
|
|
|
auto time_estimate = average(result->client_stats(), WallTime); |
|
|
|
|
|
|
|
auto qps = histogram.Count() / time_estimate; |
|
|
|
|
|
|
|
auto qps_per_server_core = qps / sum(result->server_cores(), Cores); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
result->mutable_summary()->set_qps(qps); |
|
|
|
|
|
|
|
result->mutable_summary()->set_qps_per_server_core(qps_per_server_core); |
|
|
|
|
|
|
|
result->mutable_summary()->set_latency_50(histogram.Percentile(50)); |
|
|
|
result->mutable_summary()->set_latency_50(histogram.Percentile(50)); |
|
|
|
result->mutable_summary()->set_latency_90(histogram.Percentile(90)); |
|
|
|
result->mutable_summary()->set_latency_90(histogram.Percentile(90)); |
|
|
|
result->mutable_summary()->set_latency_95(histogram.Percentile(95)); |
|
|
|
result->mutable_summary()->set_latency_95(histogram.Percentile(95)); |
|
|
|
result->mutable_summary()->set_latency_99(histogram.Percentile(99)); |
|
|
|
result->mutable_summary()->set_latency_99(histogram.Percentile(99)); |
|
|
|
result->mutable_summary()->set_latency_999(histogram.Percentile(99.9)); |
|
|
|
result->mutable_summary()->set_latency_999(histogram.Percentile(99.9)); |
|
|
|
|
|
|
|
|
|
|
|
auto server_system_time = 100.0 * |
|
|
|
// Calculate qps and cpu load for each client and then aggregate results for all clients
|
|
|
|
sum(result->server_stats(), ServerSystemTime) / |
|
|
|
double qps = 0; |
|
|
|
sum(result->server_stats(), ServerWallTime); |
|
|
|
double client_system_cpu_load = 0, client_user_cpu_load = 0; |
|
|
|
auto server_user_time = 100.0 * sum(result->server_stats(), ServerUserTime) / |
|
|
|
double server_system_cpu_load = 0, server_user_cpu_load = 0; |
|
|
|
sum(result->server_stats(), ServerWallTime); |
|
|
|
for (size_t i = 0; i < result->client_stats_size(); i++){ |
|
|
|
|
|
|
|
auto client_stat = result->client_stats(i); |
|
|
|
auto client_system_time = 100.0 * sum(result->client_stats(), SystemTime) / |
|
|
|
qps += client_stat.latencies().count() / client_stat.time_system(); |
|
|
|
sum(result->client_stats(), WallTime); |
|
|
|
client_system_cpu_load += client_stat.time_system() / client_stat.time_elapsed(); |
|
|
|
auto client_user_time = 100.0 * sum(result->client_stats(), UserTime) / |
|
|
|
client_user_cpu_load += client_stat.time_user() / client_stat.time_elapsed(); |
|
|
|
sum(result->client_stats(), WallTime); |
|
|
|
} |
|
|
|
|
|
|
|
// Calculate cpu load for each server and then aggregate results for all servers
|
|
|
|
result->mutable_summary()->set_server_system_time(server_system_time); |
|
|
|
for (size_t i = 0; i < result->server_stats_size(); i++){ |
|
|
|
result->mutable_summary()->set_server_user_time(server_user_time); |
|
|
|
auto server_stat = result->server_stats(i); |
|
|
|
result->mutable_summary()->set_client_system_time(client_system_time); |
|
|
|
server_system_cpu_load += server_stat.time_system() / server_stat.time_elapsed(); |
|
|
|
result->mutable_summary()->set_client_user_time(client_user_time); |
|
|
|
server_user_cpu_load += server_stat.time_user() / server_stat.time_elapsed(); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
result->mutable_summary()->set_qps(qps); |
|
|
|
|
|
|
|
// Populate the percentage of cpu load to result summary.
|
|
|
|
|
|
|
|
result->mutable_summary()->set_server_system_time(100 * server_system_cpu_load); |
|
|
|
|
|
|
|
result->mutable_summary()->set_server_user_time(100 * server_user_cpu_load); |
|
|
|
|
|
|
|
result->mutable_summary()->set_client_system_time(100* client_system_cpu_load); |
|
|
|
|
|
|
|
result->mutable_summary()->set_client_user_time(100 * client_user_cpu_load); |
|
|
|
|
|
|
|
|
|
|
|
// For Non-linux platform, get_cpu_usage() is not implemented. Thus,
|
|
|
|
// For Non-linux platform, get_cpu_usage() is not implemented. Thus,
|
|
|
|
// ServerTotalCpuTime and ServerIdleCpuTime are both 0.
|
|
|
|
// ServerTotalCpuTime and ServerIdleCpuTime are both 0.
|
|
|
@ -176,6 +178,9 @@ static void postprocess_scenario_result(ScenarioResult* result) { |
|
|
|
result->mutable_summary()->set_server_cpu_usage(server_cpu_usage); |
|
|
|
result->mutable_summary()->set_server_cpu_usage(server_cpu_usage); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Calculate and populate successful request per second and failed requests
|
|
|
|
|
|
|
|
// per seconds to result summary.
|
|
|
|
|
|
|
|
auto time_estimate = average(result->client_stats(), WallTime); |
|
|
|
if (result->request_results_size() > 0) { |
|
|
|
if (result->request_results_size() > 0) { |
|
|
|
int64_t successes = 0; |
|
|
|
int64_t successes = 0; |
|
|
|
int64_t failures = 0; |
|
|
|
int64_t failures = 0; |
|
|
@ -193,6 +198,9 @@ static void postprocess_scenario_result(ScenarioResult* result) { |
|
|
|
time_estimate); |
|
|
|
time_estimate); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Fill in data for other metrics required in result summary
|
|
|
|
|
|
|
|
auto qps_per_server_core = qps / sum(result->server_cores(), Cores); |
|
|
|
|
|
|
|
result->mutable_summary()->set_qps_per_server_core(qps_per_server_core); |
|
|
|
result->mutable_summary()->set_client_polls_per_request( |
|
|
|
result->mutable_summary()->set_client_polls_per_request( |
|
|
|
sum(result->client_stats(), CliPollCount) / histogram.Count()); |
|
|
|
sum(result->client_stats(), CliPollCount) / histogram.Count()); |
|
|
|
result->mutable_summary()->set_server_polls_per_request( |
|
|
|
result->mutable_summary()->set_server_polls_per_request( |
|
|
|