|
|
|
@ -281,18 +281,18 @@ def create_qpsworkers(languages, worker_hosts, perf_cmd=None): |
|
|
|
|
for worker_idx, worker in enumerate(workers)] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def perf_report_processor_job(worker_host, perf_base_name, output_filename): |
|
|
|
|
def perf_report_processor_job(worker_host, perf_base_name, output_filename, flame_graph_reports): |
|
|
|
|
print('Creating perf report collection job for %s' % worker_host) |
|
|
|
|
cmd = '' |
|
|
|
|
if worker_host != 'localhost': |
|
|
|
|
user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host) |
|
|
|
|
cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\ |
|
|
|
|
tools/run_tests/performance/process_remote_perf_flamegraphs.sh" \ |
|
|
|
|
% (user_at_host, output_filename, args.flame_graph_reports, perf_base_name) |
|
|
|
|
% (user_at_host, output_filename, flame_graph_reports, perf_base_name) |
|
|
|
|
else: |
|
|
|
|
cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\ |
|
|
|
|
tools/run_tests/performance/process_local_perf_flamegraphs.sh" \ |
|
|
|
|
% (output_filename, args.flame_graph_reports, perf_base_name) |
|
|
|
|
% (output_filename, flame_graph_reports, perf_base_name) |
|
|
|
|
|
|
|
|
|
return jobset.JobSpec(cmdline=cmd, |
|
|
|
|
timeout_seconds=3*60, |
|
|
|
@ -332,7 +332,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', |
|
|
|
|
|
|
|
|
|
for language in languages: |
|
|
|
|
for scenario_json in language.scenarios(): |
|
|
|
|
if re.search(args.regex, scenario_json['name']): |
|
|
|
|
if re.search(regex, scenario_json['name']): |
|
|
|
|
categories = scenario_json.get('CATEGORIES', ['scalable', 'smoketest']) |
|
|
|
|
if category in categories or category == 'all': |
|
|
|
|
workers = workers_by_lang[str(language)][:] |
|
|
|
@ -376,7 +376,7 @@ def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*', |
|
|
|
|
return scenarios |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def finish_qps_workers(jobs): |
|
|
|
|
def finish_qps_workers(jobs, qpsworker_jobs): |
|
|
|
|
"""Waits for given jobs to finish and eventually kills them.""" |
|
|
|
|
retries = 0 |
|
|
|
|
num_killed = 0 |
|
|
|
@ -402,7 +402,7 @@ profile_output_files = [] |
|
|
|
|
# perf reports directory. |
|
|
|
|
# Alos, the perf profiles need to be fetched and processed after each scenario |
|
|
|
|
# in order to avoid clobbering the output files. |
|
|
|
|
def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name): |
|
|
|
|
def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name, flame_graph_reports): |
|
|
|
|
perf_report_jobs = [] |
|
|
|
|
global profile_output_files |
|
|
|
|
for host_and_port in hosts_and_base_names: |
|
|
|
@ -411,14 +411,14 @@ def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name): |
|
|
|
|
# from the base filename, create .svg output filename |
|
|
|
|
host = host_and_port.split(':')[0] |
|
|
|
|
profile_output_files.append('%s.svg' % output_filename) |
|
|
|
|
perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename)) |
|
|
|
|
perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename, flame_graph_reports)) |
|
|
|
|
|
|
|
|
|
jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True) |
|
|
|
|
failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1, clear_alarms=False) |
|
|
|
|
jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True) |
|
|
|
|
return failures |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
|
|
argp = argparse.ArgumentParser(description='Run performance tests.') |
|
|
|
|
argp.add_argument('-l', '--language', |
|
|
|
|
choices=['all'] + sorted(scenario_config.LANGUAGES.keys()), |
|
|
|
@ -562,7 +562,7 @@ for scenario in scenarios: |
|
|
|
|
six.iteritems(resultset))) |
|
|
|
|
finally: |
|
|
|
|
# Consider qps workers that need to be killed as failures |
|
|
|
|
qps_workers_killed += finish_qps_workers(scenario.workers) |
|
|
|
|
qps_workers_killed += finish_qps_workers(scenario.workers, qpsworker_jobs) |
|
|
|
|
|
|
|
|
|
if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs: |
|
|
|
|
workers_and_base_names = {} |
|
|
|
@ -570,7 +570,7 @@ for scenario in scenarios: |
|
|
|
|
if not worker.perf_file_base_name: |
|
|
|
|
raise Exception('using perf buf perf report filename is unspecified') |
|
|
|
|
workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name |
|
|
|
|
perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name) |
|
|
|
|
perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name, args.flame_graph_reports) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Still write the index.html even if some scenarios failed. |
|
|
|
@ -589,3 +589,6 @@ if total_scenario_failures > 0 or qps_workers_killed > 0: |
|
|
|
|
if perf_report_failures > 0: |
|
|
|
|
print('%s perf profile collection jobs failed' % perf_report_failures) |
|
|
|
|
sys.exit(1) |
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
main() |
|
|
|
|