|
|
@ -113,7 +113,7 @@ def create_qpsworker_job(language, shortname=None, port=10000, remote_host=None, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_scenario_jobspec(scenario_json, workers, remote_host=None, |
|
|
|
def create_scenario_jobspec(scenario_json, workers, remote_host=None, |
|
|
|
bq_result_table=None): |
|
|
|
bq_result_table=None, server_cpu_load=0): |
|
|
|
"""Runs one scenario using QPS driver.""" |
|
|
|
"""Runs one scenario using QPS driver.""" |
|
|
|
# setting QPS_WORKERS env variable here makes sure it works with SSH too. |
|
|
|
# setting QPS_WORKERS env variable here makes sure it works with SSH too. |
|
|
|
cmd = 'QPS_WORKERS="%s" ' % ','.join(workers) |
|
|
|
cmd = 'QPS_WORKERS="%s" ' % ','.join(workers) |
|
|
@ -121,7 +121,9 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None, |
|
|
|
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table |
|
|
|
cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table |
|
|
|
cmd += 'tools/run_tests/performance/run_qps_driver.sh ' |
|
|
|
cmd += 'tools/run_tests/performance/run_qps_driver.sh ' |
|
|
|
cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]})) |
|
|
|
cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]})) |
|
|
|
cmd += '--scenario_result_file=scenario_result.json' |
|
|
|
cmd += '--scenario_result_file=scenario_result.json ' |
|
|
|
|
|
|
|
if server_cpu_load != 0: |
|
|
|
|
|
|
|
cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load |
|
|
|
if remote_host: |
|
|
|
if remote_host: |
|
|
|
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) |
|
|
|
user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host) |
|
|
|
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd)) |
|
|
|
cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd)) |
|
|
@ -129,7 +131,7 @@ def create_scenario_jobspec(scenario_json, workers, remote_host=None, |
|
|
|
return jobset.JobSpec( |
|
|
|
return jobset.JobSpec( |
|
|
|
cmdline=[cmd], |
|
|
|
cmdline=[cmd], |
|
|
|
shortname='qps_json_driver.%s' % scenario_json['name'], |
|
|
|
shortname='qps_json_driver.%s' % scenario_json['name'], |
|
|
|
timeout_seconds=3*60, |
|
|
|
timeout_seconds=12*60, |
|
|
|
shell=True, |
|
|
|
shell=True, |
|
|
|
verbose_success=True) |
|
|
|
verbose_success=True) |
|
|
|
|
|
|
|
|
|
|
@ -318,7 +320,7 @@ Scenario = collections.namedtuple('Scenario', 'jobspec workers name') |
|
|
|
|
|
|
|
|
|
|
|
def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', |
|
|
|
def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', |
|
|
|
category='all', bq_result_table=None, |
|
|
|
category='all', bq_result_table=None, |
|
|
|
netperf=False, netperf_hosts=[]): |
|
|
|
netperf=False, netperf_hosts=[], server_cpu_load=0): |
|
|
|
"""Create jobspecs for scenarios to run.""" |
|
|
|
"""Create jobspecs for scenarios to run.""" |
|
|
|
all_workers = [worker |
|
|
|
all_workers = [worker |
|
|
|
for workers in workers_by_lang.values() |
|
|
|
for workers in workers_by_lang.values() |
|
|
@ -379,7 +381,8 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', |
|
|
|
create_scenario_jobspec(scenario_json, |
|
|
|
create_scenario_jobspec(scenario_json, |
|
|
|
[w.host_and_port for w in workers], |
|
|
|
[w.host_and_port for w in workers], |
|
|
|
remote_host=remote_host, |
|
|
|
remote_host=remote_host, |
|
|
|
bq_result_table=bq_result_table), |
|
|
|
bq_result_table=bq_result_table, |
|
|
|
|
|
|
|
server_cpu_load=server_cpu_load), |
|
|
|
workers, |
|
|
|
workers, |
|
|
|
scenario_json['name']) |
|
|
|
scenario_json['name']) |
|
|
|
scenarios.append(scenario) |
|
|
|
scenarios.append(scenario) |
|
|
@ -461,6 +464,9 @@ argp.add_argument('--netperf', |
|
|
|
action='store_const', |
|
|
|
action='store_const', |
|
|
|
const=True, |
|
|
|
const=True, |
|
|
|
help='Run netperf benchmark as one of the scenarios.') |
|
|
|
help='Run netperf benchmark as one of the scenarios.') |
|
|
|
|
|
|
|
argp.add_argument('--server_cpu_load', |
|
|
|
|
|
|
|
default=0, type=int, |
|
|
|
|
|
|
|
help='Select a targeted server cpu load to run. 0 means ignore this flag') |
|
|
|
argp.add_argument('-x', '--xml_report', default='report.xml', type=str, |
|
|
|
argp.add_argument('-x', '--xml_report', default='report.xml', type=str, |
|
|
|
help='Name of XML report file to generate.') |
|
|
|
help='Name of XML report file to generate.') |
|
|
|
argp.add_argument('--perf_args', |
|
|
|
argp.add_argument('--perf_args', |
|
|
@ -490,7 +496,6 @@ argp.add_argument('--skip_generate_flamegraphs', |
|
|
|
'May be useful if "perf_args" arguments do not make sense for ' |
|
|
|
'May be useful if "perf_args" arguments do not make sense for ' |
|
|
|
'generating flamegraphs (e.g., "--perf_args=stat ...")')) |
|
|
|
'generating flamegraphs (e.g., "--perf_args=stat ...")')) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
args = argp.parse_args() |
|
|
|
args = argp.parse_args() |
|
|
|
|
|
|
|
|
|
|
|
languages = set(scenario_config.LANGUAGES[l] |
|
|
|
languages = set(scenario_config.LANGUAGES[l] |
|
|
@ -540,7 +545,8 @@ scenarios = create_scenarios(languages, |
|
|
|
category=args.category, |
|
|
|
category=args.category, |
|
|
|
bq_result_table=args.bq_result_table, |
|
|
|
bq_result_table=args.bq_result_table, |
|
|
|
netperf=args.netperf, |
|
|
|
netperf=args.netperf, |
|
|
|
netperf_hosts=args.remote_worker_host) |
|
|
|
netperf_hosts=args.remote_worker_host, |
|
|
|
|
|
|
|
server_cpu_load=args.server_cpu_load) |
|
|
|
|
|
|
|
|
|
|
|
if not scenarios: |
|
|
|
if not scenarios: |
|
|
|
raise Exception('No scenarios to run') |
|
|
|
raise Exception('No scenarios to run') |
|
|
|