Merge pull request #6158 from jtattermusch/performance_testing_polishing

Improvement to benchmarking
pull/6191/head
Jan Tattermusch 9 years ago
commit 700d36b6df
  1. 1
      src/csharp/Grpc.IntegrationTesting/ClientRunners.cs
  2. 2
      src/node/performance/worker.js
  3. 3
      src/node/performance/worker_service_impl.js
  4. 41
      src/proto/grpc/testing/control.proto
  5. 17
      test/cpp/qps/driver.cc
  6. 23
      test/cpp/qps/driver.h
  7. 1
      test/cpp/qps/qps_driver.cc
  8. 6
      test/cpp/qps/qps_json_driver.cc
  9. 130
      test/cpp/qps/report.cc
  10. 26
      test/cpp/qps/report.h
  11. 9
      test/cpp/util/benchmark_config.cc
  12. 127
      tools/run_tests/performance/scenario_config.py
  13. 40
      tools/run_tests/run_performance_tests.py
  14. 48
      tools/run_tests/tests.json

@ -129,6 +129,7 @@ namespace Grpc.IntegrationTesting
public ClientRunnerImpl(List<Channel> channels, ClientType clientType, RpcType rpcType, int outstandingRpcsPerChannel, LoadParams loadParams, PayloadConfig payloadConfig, HistogramParams histogramParams) public ClientRunnerImpl(List<Channel> channels, ClientType clientType, RpcType rpcType, int outstandingRpcsPerChannel, LoadParams loadParams, PayloadConfig payloadConfig, HistogramParams histogramParams)
{ {
GrpcPreconditions.CheckArgument(outstandingRpcsPerChannel > 0, "outstandingRpcsPerChannel"); GrpcPreconditions.CheckArgument(outstandingRpcsPerChannel > 0, "outstandingRpcsPerChannel");
GrpcPreconditions.CheckNotNull(histogramParams, "histogramParams");
this.channels = new List<Channel>(channels); this.channels = new List<Channel>(channels);
this.clientType = clientType; this.clientType = clientType;
this.rpcType = rpcType; this.rpcType = rpcType;

@ -33,6 +33,7 @@
'use strict'; 'use strict';
var console = require('console');
var worker_service_impl = require('./worker_service_impl'); var worker_service_impl = require('./worker_service_impl');
var grpc = require('../../../'); var grpc = require('../../../');
@ -48,6 +49,7 @@ function runServer(port) {
var address = '0.0.0.0:' + port; var address = '0.0.0.0:' + port;
server.bind(address, server_creds); server.bind(address, server_creds);
server.start(); server.start();
console.log('running QPS worker on %s', address);
return server; return server;
} }

@ -34,6 +34,7 @@
'use strict'; 'use strict';
var os = require('os'); var os = require('os');
var console = require('console');
var BenchmarkClient = require('./benchmark_client'); var BenchmarkClient = require('./benchmark_client');
var BenchmarkServer = require('./benchmark_server'); var BenchmarkServer = require('./benchmark_server');
@ -49,6 +50,7 @@ exports.runClient = function runClient(call) {
switch (request.argtype) { switch (request.argtype) {
case 'setup': case 'setup':
var setup = request.setup; var setup = request.setup;
console.log('ClientConfig %j', setup);
client = new BenchmarkClient(setup.server_targets, client = new BenchmarkClient(setup.server_targets,
setup.client_channels, setup.client_channels,
setup.histogram_params, setup.histogram_params,
@ -118,6 +120,7 @@ exports.runServer = function runServer(call) {
var stats; var stats;
switch (request.argtype) { switch (request.argtype) {
case 'setup': case 'setup':
console.log('ServerConfig %j', request.setup);
server = new BenchmarkServer('[::]', request.setup.port, server = new BenchmarkServer('[::]', request.setup.port,
request.setup.security_params); request.setup.security_params);
server.start(); server.start();

@ -194,3 +194,44 @@ message Scenario {
message Scenarios { message Scenarios {
repeated Scenario scenarios = 1; repeated Scenario scenarios = 1;
} }
// Basic summary that can be computed from ClientStats and ServerStats
// once the scenario has finished.
message ScenarioResultSummary
{
// Total number of operations per second over all clients.
double qps = 1;
// QPS per one server core.
double qps_per_server_core = 2;
// server load based on system_time (0.85 => 85%)
double server_system_time = 3;
// server load based on user_time (0.85 => 85%)
double server_user_time = 4;
// client load based on system_time (0.85 => 85%)
double client_system_time = 5;
// client load based on user_time (0.85 => 85%)
double client_user_time = 6;
// X% latency percentiles (in nanoseconds)
double latency_50 = 7;
double latency_90 = 8;
double latency_95 = 9;
double latency_99 = 10;
double latency_999 = 11;
}
// Results of a single benchmark scenario.
message ScenarioResult {
// Inputs used to run the scenario.
Scenario scenario = 1;
// Histograms from all clients merged into one histogram.
HistogramData latencies = 2;
// Client stats for each client
repeated ClientStats client_stats = 3;
// Server stats for each server
repeated ServerStats server_stats = 4;
// Number of cores available to each server
repeated int32 server_cores = 5;
// An after-the-fact computed summary
ScenarioResultSummary summary = 6;
}

@ -343,8 +343,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
// Finish a run // Finish a run
std::unique_ptr<ScenarioResult> result(new ScenarioResult); std::unique_ptr<ScenarioResult> result(new ScenarioResult);
result->client_config = result_client_config; Histogram merged_latencies;
result->server_config = result_server_config;
gpr_log(GPR_INFO, "Finishing clients"); gpr_log(GPR_INFO, "Finishing clients");
for (auto client = &clients[0]; client != &clients[num_clients]; client++) { for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
GPR_ASSERT(client->stream->Write(client_mark)); GPR_ASSERT(client->stream->Write(client_mark));
@ -353,9 +353,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
for (auto client = &clients[0]; client != &clients[num_clients]; client++) { for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
GPR_ASSERT(client->stream->Read(&client_status)); GPR_ASSERT(client->stream->Read(&client_status));
const auto& stats = client_status.stats(); const auto& stats = client_status.stats();
result->latencies.MergeProto(stats.latencies()); merged_latencies.MergeProto(stats.latencies());
result->client_resources.emplace_back( result->add_client_stats()->CopyFrom(stats);
stats.time_elapsed(), stats.time_user(), stats.time_system(), -1);
GPR_ASSERT(!client->stream->Read(&client_status)); GPR_ASSERT(!client->stream->Read(&client_status));
} }
for (auto client = &clients[0]; client != &clients[num_clients]; client++) { for (auto client = &clients[0]; client != &clients[num_clients]; client++) {
@ -363,6 +362,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
} }
delete[] clients; delete[] clients;
merged_latencies.FillProto(result->mutable_latencies());
gpr_log(GPR_INFO, "Finishing servers"); gpr_log(GPR_INFO, "Finishing servers");
for (auto server = &servers[0]; server != &servers[num_servers]; server++) { for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
GPR_ASSERT(server->stream->Write(server_mark)); GPR_ASSERT(server->stream->Write(server_mark));
@ -370,10 +371,8 @@ std::unique_ptr<ScenarioResult> RunScenario(
} }
for (auto server = &servers[0]; server != &servers[num_servers]; server++) { for (auto server = &servers[0]; server != &servers[num_servers]; server++) {
GPR_ASSERT(server->stream->Read(&server_status)); GPR_ASSERT(server->stream->Read(&server_status));
const auto& stats = server_status.stats(); result->add_server_stats()->CopyFrom(server_status.stats());
result->server_resources.emplace_back( result->add_server_cores(server_status.cores());
stats.time_elapsed(), stats.time_user(), stats.time_system(),
server_status.cores());
GPR_ASSERT(!server->stream->Read(&server_status)); GPR_ASSERT(!server->stream->Read(&server_status));
} }
for (auto server = &servers[0]; server != &servers[num_servers]; server++) { for (auto server = &servers[0]; server != &servers[num_servers]; server++) {

@ -41,29 +41,6 @@
namespace grpc { namespace grpc {
namespace testing { namespace testing {
class ResourceUsage {
public:
ResourceUsage(double w, double u, double s, int c)
: wall_time_(w), user_time_(u), system_time_(s), cores_(c) {}
double wall_time() const { return wall_time_; }
double user_time() const { return user_time_; }
double system_time() const { return system_time_; }
int cores() const { return cores_; }
private:
double wall_time_;
double user_time_;
double system_time_;
int cores_;
};
struct ScenarioResult {
Histogram latencies;
std::vector<ResourceUsage> client_resources;
std::vector<ResourceUsage> server_resources;
ClientConfig client_config;
ServerConfig server_config;
};
std::unique_ptr<ScenarioResult> RunScenario( std::unique_ptr<ScenarioResult> RunScenario(
const grpc::testing::ClientConfig& client_config, size_t num_clients, const grpc::testing::ClientConfig& client_config, size_t num_clients,

@ -85,7 +85,6 @@ using grpc::testing::ServerConfig;
using grpc::testing::ClientType; using grpc::testing::ClientType;
using grpc::testing::ServerType; using grpc::testing::ServerType;
using grpc::testing::RpcType; using grpc::testing::RpcType;
using grpc::testing::ResourceUsage;
using grpc::testing::SecurityParams; using grpc::testing::SecurityParams;
namespace grpc { namespace grpc {

@ -102,12 +102,16 @@ static void QpsDriver() {
for (int i = 0; i < scenarios.scenarios_size(); i++) { for (int i = 0; i < scenarios.scenarios_size(); i++) {
const Scenario &scenario = scenarios.scenarios(i); const Scenario &scenario = scenarios.scenarios(i);
std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n"; std::cerr << "RUNNING SCENARIO: " << scenario.name() << "\n";
const auto result = auto result =
RunScenario(scenario.client_config(), scenario.num_clients(), RunScenario(scenario.client_config(), scenario.num_clients(),
scenario.server_config(), scenario.num_servers(), scenario.server_config(), scenario.num_servers(),
scenario.warmup_seconds(), scenario.benchmark_seconds(), scenario.warmup_seconds(), scenario.benchmark_seconds(),
scenario.spawn_local_worker_count()); scenario.spawn_local_worker_count());
// Amend the result with scenario config. Eventually we should adjust
// RunScenario contract so we don't need to touch the result here.
result->mutable_scenario()->CopyFrom(scenario);
GetReporter()->ReportQPS(*result); GetReporter()->ReportQPS(*result);
GetReporter()->ReportQPSPerCore(*result); GetReporter()->ReportQPSPerCore(*result);
GetReporter()->ReportLatency(*result); GetReporter()->ReportLatency(*result);

@ -33,6 +33,11 @@
#include "test/cpp/qps/report.h" #include "test/cpp/qps/report.h"
#include <fstream>
#include <google/protobuf/util/json_util.h>
#include <google/protobuf/util/type_resolver_util.h>
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include "test/cpp/qps/driver.h" #include "test/cpp/qps/driver.h"
#include "test/cpp/qps/stats.h" #include "test/cpp/qps/stats.h"
@ -40,10 +45,13 @@
namespace grpc { namespace grpc {
namespace testing { namespace testing {
static double WallTime(ResourceUsage u) { return u.wall_time(); } static double WallTime(ClientStats s) { return s.time_elapsed(); }
static double UserTime(ResourceUsage u) { return u.user_time(); } static double SystemTime(ClientStats s) { return s.time_system(); }
static double SystemTime(ResourceUsage u) { return u.system_time(); } static double UserTime(ClientStats s) { return s.time_user(); }
static int Cores(ResourceUsage u) { return u.cores(); } static double ServerWallTime(ServerStats s) { return s.time_elapsed(); }
static double ServerSystemTime(ServerStats s) { return s.time_system(); }
static double ServerUserTime(ServerStats s) { return s.time_user(); }
static int Cores(int n) { return n; }
void CompositeReporter::add(std::unique_ptr<Reporter> reporter) { void CompositeReporter::add(std::unique_ptr<Reporter> reporter) {
reporters_.emplace_back(std::move(reporter)); reporters_.emplace_back(std::move(reporter));
@ -74,102 +82,74 @@ void CompositeReporter::ReportTimes(const ScenarioResult& result) {
} }
void GprLogReporter::ReportQPS(const ScenarioResult& result) { void GprLogReporter::ReportQPS(const ScenarioResult& result) {
gpr_log( Histogram histogram;
GPR_INFO, "QPS: %.1f", histogram.MergeProto(result.latencies());
result.latencies.Count() / average(result.client_resources, WallTime)); gpr_log(GPR_INFO, "QPS: %.1f",
histogram.Count() / average(result.client_stats(), WallTime));
} }
void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) { void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) {
auto qps = Histogram histogram;
result.latencies.Count() / average(result.client_resources, WallTime); histogram.MergeProto(result.latencies());
auto qps = histogram.Count() / average(result.client_stats(), WallTime);
gpr_log(GPR_INFO, "QPS: %.1f (%.1f/server core)", qps, gpr_log(GPR_INFO, "QPS: %.1f (%.1f/server core)", qps,
qps / sum(result.server_resources, Cores)); qps / sum(result.server_cores(), Cores));
} }
void GprLogReporter::ReportLatency(const ScenarioResult& result) { void GprLogReporter::ReportLatency(const ScenarioResult& result) {
Histogram histogram;
histogram.MergeProto(result.latencies());
gpr_log(GPR_INFO, gpr_log(GPR_INFO,
"Latencies (50/90/95/99/99.9%%-ile): %.1f/%.1f/%.1f/%.1f/%.1f us", "Latencies (50/90/95/99/99.9%%-ile): %.1f/%.1f/%.1f/%.1f/%.1f us",
result.latencies.Percentile(50) / 1000, histogram.Percentile(50) / 1000, histogram.Percentile(90) / 1000,
result.latencies.Percentile(90) / 1000, histogram.Percentile(95) / 1000, histogram.Percentile(99) / 1000,
result.latencies.Percentile(95) / 1000, histogram.Percentile(99.9) / 1000);
result.latencies.Percentile(99) / 1000,
result.latencies.Percentile(99.9) / 1000);
} }
void GprLogReporter::ReportTimes(const ScenarioResult& result) { void GprLogReporter::ReportTimes(const ScenarioResult& result) {
gpr_log(GPR_INFO, "Server system time: %.2f%%", gpr_log(GPR_INFO, "Server system time: %.2f%%",
100.0 * sum(result.server_resources, SystemTime) / 100.0 * sum(result.server_stats(), ServerSystemTime) /
sum(result.server_resources, WallTime)); sum(result.server_stats(), ServerWallTime));
gpr_log(GPR_INFO, "Server user time: %.2f%%", gpr_log(GPR_INFO, "Server user time: %.2f%%",
100.0 * sum(result.server_resources, UserTime) / 100.0 * sum(result.server_stats(), ServerUserTime) /
sum(result.server_resources, WallTime)); sum(result.server_stats(), ServerWallTime));
gpr_log(GPR_INFO, "Client system time: %.2f%%", gpr_log(GPR_INFO, "Client system time: %.2f%%",
100.0 * sum(result.client_resources, SystemTime) / 100.0 * sum(result.client_stats(), SystemTime) /
sum(result.client_resources, WallTime)); sum(result.client_stats(), WallTime));
gpr_log(GPR_INFO, "Client user time: %.2f%%", gpr_log(GPR_INFO, "Client user time: %.2f%%",
100.0 * sum(result.client_resources, UserTime) / 100.0 * sum(result.client_stats(), UserTime) /
sum(result.client_resources, WallTime)); sum(result.client_stats(), WallTime));
} }
void PerfDbReporter::ReportQPS(const ScenarioResult& result) { void JsonReporter::ReportQPS(const ScenarioResult& result) {
auto qps = std::unique_ptr<google::protobuf::util::TypeResolver> type_resolver(
result.latencies.Count() / average(result.client_resources, WallTime); google::protobuf::util::NewTypeResolverForDescriptorPool(
"type.googleapis.com",
perf_db_client_.setQps(qps); google::protobuf::DescriptorPool::generated_pool()));
perf_db_client_.setConfigs(result.client_config, result.server_config); grpc::string binary;
grpc::string json_string;
result.SerializeToString(&binary);
auto status = BinaryToJsonString(
type_resolver.get(), "type.googleapis.com/grpc.testing.ScenarioResult",
binary, &json_string);
GPR_ASSERT(status.ok());
std::ofstream output_file(report_file_);
output_file << json_string;
output_file.close();
} }
void PerfDbReporter::ReportQPSPerCore(const ScenarioResult& result) { void JsonReporter::ReportQPSPerCore(const ScenarioResult& result) {
auto qps = // NOP - all reporting is handled by ReportQPS.
result.latencies.Count() / average(result.client_resources, WallTime);
auto qps_per_core = qps / sum(result.server_resources, Cores);
perf_db_client_.setQps(qps);
perf_db_client_.setQpsPerCore(qps_per_core);
perf_db_client_.setConfigs(result.client_config, result.server_config);
} }
void PerfDbReporter::ReportLatency(const ScenarioResult& result) { void JsonReporter::ReportLatency(const ScenarioResult& result) {
perf_db_client_.setLatencies(result.latencies.Percentile(50) / 1000, // NOP - all reporting is handled by ReportQPS.
result.latencies.Percentile(90) / 1000,
result.latencies.Percentile(95) / 1000,
result.latencies.Percentile(99) / 1000,
result.latencies.Percentile(99.9) / 1000);
perf_db_client_.setConfigs(result.client_config, result.server_config);
} }
void PerfDbReporter::ReportTimes(const ScenarioResult& result) { void JsonReporter::ReportTimes(const ScenarioResult& result) {
const double server_system_time = 100.0 * // NOP - all reporting is handled by ReportQPS.
sum(result.server_resources, SystemTime) /
sum(result.server_resources, WallTime);
const double server_user_time = 100.0 *
sum(result.server_resources, UserTime) /
sum(result.server_resources, WallTime);
const double client_system_time = 100.0 *
sum(result.client_resources, SystemTime) /
sum(result.client_resources, WallTime);
const double client_user_time = 100.0 *
sum(result.client_resources, UserTime) /
sum(result.client_resources, WallTime);
perf_db_client_.setTimes(server_system_time, server_user_time,
client_system_time, client_user_time);
perf_db_client_.setConfigs(result.client_config, result.server_config);
}
void PerfDbReporter::SendData() {
// send data to performance database
bool data_state =
perf_db_client_.sendData(hashed_id_, test_name_, sys_info_, tag_);
// check state of data sending
if (data_state) {
gpr_log(GPR_INFO, "Data sent to performance database successfully");
} else {
gpr_log(GPR_INFO, "Data could not be sent to performance database");
}
} }
} // namespace testing } // namespace testing

@ -104,33 +104,19 @@ class GprLogReporter : public Reporter {
void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE;
}; };
/** Reporter for performance database tool */ /** Dumps the report to a JSON file. */
class PerfDbReporter : public Reporter { class JsonReporter : public Reporter {
public: public:
PerfDbReporter(const string& name, const string& hashed_id, JsonReporter(const string& name, const string& report_file)
const string& test_name, const string& sys_info, : Reporter(name), report_file_(report_file) {}
const string& server_address, const string& tag)
: Reporter(name),
hashed_id_(hashed_id),
test_name_(test_name),
sys_info_(sys_info),
tag_(tag) {
perf_db_client_.init(grpc::CreateChannel(
server_address, grpc::InsecureChannelCredentials()));
}
~PerfDbReporter() GRPC_OVERRIDE { SendData(); };
private: private:
PerfDbClient perf_db_client_;
std::string hashed_id_;
std::string test_name_;
std::string sys_info_;
std::string tag_;
void ReportQPS(const ScenarioResult& result) GRPC_OVERRIDE; void ReportQPS(const ScenarioResult& result) GRPC_OVERRIDE;
void ReportQPSPerCore(const ScenarioResult& result) GRPC_OVERRIDE; void ReportQPSPerCore(const ScenarioResult& result) GRPC_OVERRIDE;
void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE; void ReportLatency(const ScenarioResult& result) GRPC_OVERRIDE;
void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE; void ReportTimes(const ScenarioResult& result) GRPC_OVERRIDE;
void SendData();
const string report_file_;
}; };
} // namespace testing } // namespace testing

@ -37,8 +37,8 @@
DEFINE_bool(enable_log_reporter, true, DEFINE_bool(enable_log_reporter, true,
"Enable reporting of benchmark results through GprLog"); "Enable reporting of benchmark results through GprLog");
DEFINE_bool(report_metrics_db, false, DEFINE_string(scenario_result_file, "",
"True if metrics to be reported to performance database"); "Write JSON benchmark report to the file specified.");
DEFINE_string(hashed_id, "", "Hash of the user id"); DEFINE_string(hashed_id, "", "Hash of the user id");
@ -71,10 +71,9 @@ static std::shared_ptr<Reporter> InitBenchmarkReporters() {
composite_reporter->add( composite_reporter->add(
std::unique_ptr<Reporter>(new GprLogReporter("LogReporter"))); std::unique_ptr<Reporter>(new GprLogReporter("LogReporter")));
} }
if (FLAGS_report_metrics_db) { if (FLAGS_scenario_result_file != "") {
composite_reporter->add(std::unique_ptr<Reporter>( composite_reporter->add(std::unique_ptr<Reporter>(
new PerfDbReporter("PerfDbReporter", FLAGS_hashed_id, FLAGS_test_name, new JsonReporter("JsonReporter", FLAGS_scenario_result_file)));
FLAGS_sys_info, FLAGS_server_address, FLAGS_tag)));
} }
return std::shared_ptr<Reporter>(composite_reporter); return std::shared_ptr<Reporter>(composite_reporter);

@ -33,6 +33,11 @@ SINGLE_MACHINE_CORES=8
WARMUP_SECONDS=5 WARMUP_SECONDS=5
BENCHMARK_SECONDS=30 BENCHMARK_SECONDS=30
HISTOGRAM_PARAMS = {
'resolution': 0.01,
'max_possible': 60e9,
}
EMPTY_GENERIC_PAYLOAD = { EMPTY_GENERIC_PAYLOAD = {
'bytebuf_params': { 'bytebuf_params': {
'req_size': 0, 'req_size': 0,
@ -83,7 +88,7 @@ class CXXLanguage:
secargs = None secargs = None
yield { yield {
'name': 'generic_async_streaming_ping_pong_%s' 'name': 'cpp_generic_async_streaming_ping_pong_%s'
% secstr, % secstr,
'num_servers': 1, 'num_servers': 1,
'num_clients': 1, 'num_clients': 1,
@ -98,6 +103,7 @@ class CXXLanguage:
'closed_loop': {} 'closed_loop': {}
}, },
'payload_config': EMPTY_GENERIC_PAYLOAD, 'payload_config': EMPTY_GENERIC_PAYLOAD,
'histogram_params': HISTOGRAM_PARAMS,
}, },
'server_config': { 'server_config': {
'server_type': 'ASYNC_GENERIC_SERVER', 'server_type': 'ASYNC_GENERIC_SERVER',
@ -110,7 +116,7 @@ class CXXLanguage:
'benchmark_seconds': BENCHMARK_SECONDS 'benchmark_seconds': BENCHMARK_SECONDS
} }
yield { yield {
'name': 'generic_async_streaming_qps_unconstrained_%s' 'name': 'cpp_generic_async_streaming_qps_unconstrained_%s'
% secstr, % secstr,
'num_servers': 1, 'num_servers': 1,
'num_clients': 0, 'num_clients': 0,
@ -125,6 +131,7 @@ class CXXLanguage:
'closed_loop': {} 'closed_loop': {}
}, },
'payload_config': EMPTY_GENERIC_PAYLOAD, 'payload_config': EMPTY_GENERIC_PAYLOAD,
'histogram_params': HISTOGRAM_PARAMS,
}, },
'server_config': { 'server_config': {
'server_type': 'ASYNC_GENERIC_SERVER', 'server_type': 'ASYNC_GENERIC_SERVER',
@ -137,7 +144,7 @@ class CXXLanguage:
'benchmark_seconds': BENCHMARK_SECONDS 'benchmark_seconds': BENCHMARK_SECONDS
} }
yield { yield {
'name': 'generic_async_streaming_qps_one_server_core_%s' 'name': 'cpp_generic_async_streaming_qps_one_server_core_%s'
% secstr, % secstr,
'num_servers': 1, 'num_servers': 1,
'num_clients': 0, 'num_clients': 0,
@ -152,6 +159,7 @@ class CXXLanguage:
'closed_loop': {} 'closed_loop': {}
}, },
'payload_config': EMPTY_GENERIC_PAYLOAD, 'payload_config': EMPTY_GENERIC_PAYLOAD,
'histogram_params': HISTOGRAM_PARAMS,
}, },
'server_config': { 'server_config': {
'server_type': 'ASYNC_GENERIC_SERVER', 'server_type': 'ASYNC_GENERIC_SERVER',
@ -164,7 +172,7 @@ class CXXLanguage:
'benchmark_seconds': BENCHMARK_SECONDS 'benchmark_seconds': BENCHMARK_SECONDS
} }
yield { yield {
'name': 'protobuf_async_qps_unconstrained_%s' 'name': 'cpp_protobuf_async_streaming_qps_unconstrained_%s'
% secstr, % secstr,
'num_servers': 1, 'num_servers': 1,
'num_clients': 0, 'num_clients': 0,
@ -178,20 +186,20 @@ class CXXLanguage:
'load_params': { 'load_params': {
'closed_loop': {} 'closed_loop': {}
}, },
'payload_config': EMPTY_GENERIC_PAYLOAD, 'payload_config': EMPTY_PROTO_PAYLOAD,
'histogram_params': HISTOGRAM_PARAMS,
}, },
'server_config': { 'server_config': {
'server_type': 'ASYNC_GENERIC_SERVER', 'server_type': 'ASYNC_SERVER',
'security_params': secargs, 'security_params': secargs,
'core_limit': SINGLE_MACHINE_CORES/2, 'core_limit': SINGLE_MACHINE_CORES/2,
'async_server_threads': 1, 'async_server_threads': 1,
'payload_config': EMPTY_GENERIC_PAYLOAD,
}, },
'warmup_seconds': WARMUP_SECONDS, 'warmup_seconds': WARMUP_SECONDS,
'benchmark_seconds': BENCHMARK_SECONDS 'benchmark_seconds': BENCHMARK_SECONDS
} }
yield { yield {
'name': 'single_channel_throughput_%s' 'name': 'cpp_single_channel_throughput_%s'
% secstr, % secstr,
'num_servers': 1, 'num_servers': 1,
'num_clients': 1, 'num_clients': 1,
@ -206,6 +214,7 @@ class CXXLanguage:
'closed_loop': {} 'closed_loop': {}
}, },
'payload_config': BIG_GENERIC_PAYLOAD, 'payload_config': BIG_GENERIC_PAYLOAD,
'histogram_params': HISTOGRAM_PARAMS,
}, },
'server_config': { 'server_config': {
'server_type': 'ASYNC_GENERIC_SERVER', 'server_type': 'ASYNC_GENERIC_SERVER',
@ -218,7 +227,7 @@ class CXXLanguage:
'benchmark_seconds': BENCHMARK_SECONDS 'benchmark_seconds': BENCHMARK_SECONDS
} }
yield { yield {
'name': 'protobuf_async_ping_pong_%s' 'name': 'cpp_protobuf_async_ping_pong_%s'
% secstr, % secstr,
'num_servers': 1, 'num_servers': 1,
'num_clients': 1, 'num_clients': 1,
@ -233,13 +242,13 @@ class CXXLanguage:
'closed_loop': {} 'closed_loop': {}
}, },
'payload_config': EMPTY_PROTO_PAYLOAD, 'payload_config': EMPTY_PROTO_PAYLOAD,
'histogram_params': HISTOGRAM_PARAMS,
}, },
'server_config': { 'server_config': {
'server_type': 'ASYNC_GENERIC_SERVER', 'server_type': 'ASYNC_SERVER',
'security_params': secargs, 'security_params': secargs,
'core_limit': SINGLE_MACHINE_CORES/2, 'core_limit': SINGLE_MACHINE_CORES/2,
'async_server_threads': 1, 'async_server_threads': 1,
'payload_config': EMPTY_PROTO_PAYLOAD,
}, },
'warmup_seconds': WARMUP_SECONDS, 'warmup_seconds': WARMUP_SECONDS,
'benchmark_seconds': BENCHMARK_SECONDS 'benchmark_seconds': BENCHMARK_SECONDS
@ -262,8 +271,9 @@ class CSharpLanguage:
def scenarios(self): def scenarios(self):
# TODO(jtattermusch): add more scenarios # TODO(jtattermusch): add more scenarios
secargs = None
yield { yield {
'name': 'csharp_async_generic_streaming_ping_pong', 'name': 'csharp_generic_async_streaming_ping_pong',
'num_servers': 1, 'num_servers': 1,
'num_clients': 1, 'num_clients': 1,
'client_config': { 'client_config': {
@ -277,17 +287,97 @@ class CSharpLanguage:
'closed_loop': {} 'closed_loop': {}
}, },
'payload_config': EMPTY_GENERIC_PAYLOAD, 'payload_config': EMPTY_GENERIC_PAYLOAD,
'histogram_params': HISTOGRAM_PARAMS,
}, },
'server_config': { 'server_config': {
'server_type': 'ASYNC_GENERIC_SERVER', 'server_type': 'ASYNC_GENERIC_SERVER',
'security_params': secargs, 'security_params': secargs,
'core_limit': SINGLE_MACHINE_CORES/2, 'core_limit': 0,
'async_server_threads': 1, 'async_server_threads': 1,
'payload_config': EMPTY_GENERIC_PAYLOAD, 'payload_config': EMPTY_GENERIC_PAYLOAD,
}, },
'warmup_seconds': WARMUP_SECONDS, 'warmup_seconds': WARMUP_SECONDS,
'benchmark_seconds': BENCHMARK_SECONDS 'benchmark_seconds': BENCHMARK_SECONDS
} }
yield {
'name': 'csharp_protobuf_async_unary_ping_pong',
'num_servers': 1,
'num_clients': 1,
'client_config': {
'client_type': 'ASYNC_CLIENT',
'security_params': secargs,
'outstanding_rpcs_per_channel': 1,
'client_channels': 1,
'async_client_threads': 1,
'rpc_type': 'UNARY',
'load_params': {
'closed_loop': {}
},
'payload_config': EMPTY_PROTO_PAYLOAD,
'histogram_params': HISTOGRAM_PARAMS,
},
'server_config': {
'server_type': 'ASYNC_SERVER',
'security_params': secargs,
'core_limit': 0,
'async_server_threads': 1,
},
'warmup_seconds': WARMUP_SECONDS,
'benchmark_seconds': BENCHMARK_SECONDS
}
yield {
'name': 'csharp_protobuf_sync_to_async_unary_ping_pong',
'num_servers': 1,
'num_clients': 1,
'client_config': {
'client_type': 'SYNC_CLIENT',
'security_params': secargs,
'outstanding_rpcs_per_channel': 1,
'client_channels': 1,
'async_client_threads': 1,
'rpc_type': 'UNARY',
'load_params': {
'closed_loop': {}
},
'payload_config': EMPTY_PROTO_PAYLOAD,
'histogram_params': HISTOGRAM_PARAMS,
},
'server_config': {
'server_type': 'ASYNC_SERVER',
'security_params': secargs,
'core_limit': 0,
'async_server_threads': 1,
},
'warmup_seconds': WARMUP_SECONDS,
'benchmark_seconds': BENCHMARK_SECONDS
}
yield {
'name': 'csharp_to_cpp_protobuf_sync_unary_ping_pong',
'num_servers': 1,
'num_clients': 1,
'client_config': {
'client_type': 'SYNC_CLIENT',
'security_params': secargs,
'outstanding_rpcs_per_channel': 1,
'client_channels': 1,
'async_client_threads': 1,
'rpc_type': 'UNARY',
'load_params': {
'closed_loop': {}
},
'payload_config': EMPTY_PROTO_PAYLOAD,
'histogram_params': HISTOGRAM_PARAMS,
},
'server_config': {
'server_type': 'SYNC_SERVER',
'security_params': secargs,
'core_limit': 0,
'async_server_threads': 1,
},
'warmup_seconds': WARMUP_SECONDS,
'benchmark_seconds': BENCHMARK_SECONDS,
'SERVER_LANGUAGE': 'c++' # recognized by run_performance_tests.py
}
def __str__(self): def __str__(self):
return 'csharp' return 'csharp'
@ -307,8 +397,9 @@ class NodeLanguage:
def scenarios(self): def scenarios(self):
# TODO(jtattermusch): add more scenarios # TODO(jtattermusch): add more scenarios
secargs = None
yield { yield {
'name': 'node_sync_unary_ping_pong_protobuf', 'name': 'node_protobuf_unary_ping_pong',
'num_servers': 1, 'num_servers': 1,
'num_clients': 1, 'num_clients': 1,
'client_config': { 'client_config': {
@ -317,18 +408,18 @@ class NodeLanguage:
'outstanding_rpcs_per_channel': 1, 'outstanding_rpcs_per_channel': 1,
'client_channels': 1, 'client_channels': 1,
'async_client_threads': 1, 'async_client_threads': 1,
'rpc_type': 'STREAMING', 'rpc_type': 'UNARY',
'load_params': { 'load_params': {
'closed_loop': {} 'closed_loop': {}
}, },
'payload_config': EMPTY_PROTO_PAYLOAD, 'payload_config': EMPTY_PROTO_PAYLOAD,
'histogram_params': HISTOGRAM_PARAMS,
}, },
'server_config': { 'server_config': {
'server_type': 'ASYNC_GENERIC_SERVER', 'server_type': 'ASYNC_SERVER',
'security_params': secargs, 'security_params': secargs,
'core_limit': SINGLE_MACHINE_CORES/2, 'core_limit': 0,
'async_server_threads': 1, 'async_server_threads': 1,
'payload_config': EMPTY_PROTO_PAYLOAD,
}, },
'warmup_seconds': WARMUP_SECONDS, 'warmup_seconds': WARMUP_SECONDS,
'benchmark_seconds': BENCHMARK_SECONDS 'benchmark_seconds': BENCHMARK_SECONDS

@ -37,10 +37,12 @@ import json
import multiprocessing import multiprocessing
import os import os
import pipes import pipes
import re
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
import time import time
import traceback
import uuid import uuid
import performance.scenario_config as scenario_config import performance.scenario_config as scenario_config
@ -82,6 +84,8 @@ def create_qpsworker_job(language, shortname=None,
else: else:
host_and_port='localhost:%s' % port host_and_port='localhost:%s' % port
# TODO(jtattermusch): with some care, we can calculate the right timeout
# of a worker from the sum of warmup + benchmark times for all the scenarios
jobspec = jobset.JobSpec( jobspec = jobset.JobSpec(
cmdline=cmdline, cmdline=cmdline,
shortname=shortname, shortname=shortname,
@ -94,6 +98,7 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None):
# setting QPS_WORKERS env variable here makes sure it works with SSH too. # setting QPS_WORKERS env variable here makes sure it works with SSH too.
cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver ' % ','.join(workers) cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver ' % ','.join(workers)
cmd += '--scenarios_json=%s' % pipes.quote(json.dumps({'scenarios': [scenario_json]})) cmd += '--scenarios_json=%s' % pipes.quote(json.dumps({'scenarios': [scenario_json]}))
cmd += ' --scenario_result_file=scenario_result.json'
if remote_host: if remote_host:
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && %s"' % (user_at_host, cmd) cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && %s"' % (user_at_host, cmd)
@ -221,15 +226,30 @@ def start_qpsworkers(languages, worker_hosts):
for worker_idx, worker in enumerate(workers)] for worker_idx, worker in enumerate(workers)]
def create_scenarios(languages, workers_by_lang, remote_host=None): def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*'):
"""Create jobspecs for scenarios to run.""" """Create jobspecs for scenarios to run."""
scenarios = [] scenarios = []
for language in languages: for language in languages:
for scenario_json in language.scenarios(): for scenario_json in language.scenarios():
scenario = create_scenario_jobspec(scenario_json, if re.search(args.regex, scenario_json['name']):
workers_by_lang[str(language)], workers = workers_by_lang[str(language)]
remote_host=remote_host) # 'SERVER_LANGUAGE' is an indicator for this script to pick
scenarios.append(scenario) # a server in different language. It doesn't belong to the Scenario
# schema, so we also need to remove it.
custom_server_lang = scenario_json.pop('SERVER_LANGUAGE', None)
if custom_server_lang:
if not workers_by_lang.get(custom_server_lang, []):
print 'Warning: Skipping scenario %s as' % scenario_json['name']
print('SERVER_LANGUAGE is set to %s yet the language has '
'not been selected with -l' % custom_server_lang)
continue
for idx in range(0, scenario_json['num_servers']):
# replace first X workers by workers of a different language
workers[idx] = workers_by_lang[custom_server_lang][idx]
scenario = create_scenario_jobspec(scenario_json,
workers,
remote_host=remote_host)
scenarios.append(scenario)
# the very last scenario requests shutting down the workers. # the very last scenario requests shutting down the workers.
all_workers = [worker all_workers = [worker
@ -268,6 +288,8 @@ argp.add_argument('--remote_worker_host',
nargs='+', nargs='+',
default=[], default=[],
help='Worker hosts where to start QPS workers.') help='Worker hosts where to start QPS workers.')
argp.add_argument('-r', '--regex', default='.*', type=str,
help='Regex to select scenarios to run.')
args = argp.parse_args() args = argp.parse_args()
@ -295,6 +317,9 @@ build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build
qpsworker_jobs = start_qpsworkers(languages, args.remote_worker_host) qpsworker_jobs = start_qpsworkers(languages, args.remote_worker_host)
# TODO(jtattermusch): see https://github.com/grpc/grpc/issues/6174
time.sleep(5)
# get list of worker addresses for each language. # get list of worker addresses for each language.
worker_addresses = dict([(str(language), []) for language in languages]) worker_addresses = dict([(str(language), []) for language in languages])
for job in qpsworker_jobs: for job in qpsworker_jobs:
@ -303,7 +328,8 @@ for job in qpsworker_jobs:
try: try:
scenarios = create_scenarios(languages, scenarios = create_scenarios(languages,
workers_by_lang=worker_addresses, workers_by_lang=worker_addresses,
remote_host=args.remote_driver_host) remote_host=args.remote_driver_host,
regex=args.regex)
if not scenarios: if not scenarios:
raise Exception('No scenarios to run') raise Exception('No scenarios to run')
@ -318,5 +344,7 @@ try:
jobset.message('FAILED', 'Some of the scenarios failed.', jobset.message('FAILED', 'Some of the scenarios failed.',
do_newline=True) do_newline=True)
sys.exit(1) sys.exit(1)
except:
traceback.print_exc()
finally: finally:
finish_qps_workers(qpsworker_jobs) finish_qps_workers(qpsworker_jobs)

@ -22035,7 +22035,7 @@
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"generic_async_streaming_ping_pong_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 1}'" "'{\"name\": \"cpp_generic_async_streaming_ping_pong_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22056,12 +22056,12 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:generic_async_streaming_ping_pong_secure" "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_secure"
}, },
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"generic_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 0}'" "'{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22082,12 +22082,12 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:generic_async_streaming_qps_unconstrained_secure" "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_secure"
}, },
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 0}'" "'{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22108,12 +22108,12 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:generic_async_streaming_qps_one_server_core_secure" "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_secure"
}, },
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"protobuf_async_qps_unconstrained_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 0}'" "'{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22134,12 +22134,12 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:protobuf_async_qps_unconstrained_secure" "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_secure"
}, },
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"single_channel_throughput_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 1}'" "'{\"name\": \"cpp_single_channel_throughput_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22160,12 +22160,12 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:single_channel_throughput_secure" "shortname": "json_run_localhost:cpp_single_channel_throughput_secure"
}, },
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"protobuf_async_ping_pong_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 1}'" "'{\"name\": \"cpp_protobuf_async_ping_pong_secure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22186,12 +22186,12 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:protobuf_async_ping_pong_secure" "shortname": "json_run_localhost:cpp_protobuf_async_ping_pong_secure"
}, },
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"generic_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 1}'" "'{\"name\": \"cpp_generic_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22212,12 +22212,12 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:generic_async_streaming_ping_pong_insecure" "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_insecure"
}, },
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"generic_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 0}'" "'{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22238,12 +22238,12 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:generic_async_streaming_qps_unconstrained_insecure" "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_insecure"
}, },
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 0}'" "'{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 1, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22264,12 +22264,12 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:generic_async_streaming_qps_one_server_core_insecure" "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_insecure"
}, },
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"protobuf_async_qps_unconstrained_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 0}'" "'{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 0}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22290,12 +22290,12 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:protobuf_async_qps_unconstrained_insecure" "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_insecure"
}, },
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"single_channel_throughput_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 1}'" "'{\"name\": \"cpp_single_channel_throughput_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22316,12 +22316,12 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:single_channel_throughput_insecure" "shortname": "json_run_localhost:cpp_single_channel_throughput_insecure"
}, },
{ {
"args": [ "args": [
"--scenario_json", "--scenario_json",
"'{\"name\": \"protobuf_async_ping_pong_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"server_type\": \"ASYNC_GENERIC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}}, \"num_clients\": 1}'" "'{\"name\": \"cpp_protobuf_async_ping_pong_insecure\", \"warmup_seconds\": 5, \"benchmark_seconds\": 30, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"core_limit\": 4, \"security_params\": null, \"server_type\": \"ASYNC_SERVER\"}, \"client_config\": {\"client_type\": \"ASYNC_CLIENT\", \"security_params\": null, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"load_params\": {\"closed_loop\": {}}, \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}, \"num_clients\": 1}'"
], ],
"boringssl": true, "boringssl": true,
"ci_platforms": [ "ci_platforms": [
@ -22342,7 +22342,7 @@
"posix", "posix",
"windows" "windows"
], ],
"shortname": "json_run_localhost:protobuf_async_ping_pong_insecure" "shortname": "json_run_localhost:cpp_protobuf_async_ping_pong_insecure"
}, },
{ {
"args": [ "args": [

Loading…
Cancel
Save