mirror of https://github.com/grpc/grpc.git
The C based gRPC (C++, Python, Ruby, Objective-C, PHP, C#)
https://grpc.io/
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
281 lines
11 KiB
281 lines
11 KiB
#!/usr/bin/env python |
|
# Copyright 2017, Google Inc. |
|
# All rights reserved. |
|
# |
|
# Redistribution and use in source and binary forms, with or without |
|
# modification, are permitted provided that the following conditions are |
|
# met: |
|
# |
|
# * Redistributions of source code must retain the above copyright |
|
# notice, this list of conditions and the following disclaimer. |
|
# * Redistributions in binary form must reproduce the above |
|
# copyright notice, this list of conditions and the following disclaimer |
|
# in the documentation and/or other materials provided with the |
|
# distribution. |
|
# * Neither the name of Google Inc. nor the names of its |
|
# contributors may be used to endorse or promote products derived from |
|
# this software without specific prior written permission. |
|
# |
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
|
|
import cgi |
|
import multiprocessing |
|
import os |
|
import subprocess |
|
import sys |
|
import argparse |
|
|
|
import python_utils.jobset as jobset |
|
import python_utils.start_port_server as start_port_server |
|
|
|
_AVAILABLE_BENCHMARK_TESTS = ['bm_fullstack_unary_ping_pong', |
|
'bm_fullstack_streaming_ping_pong', |
|
'bm_fullstack_streaming_pump', |
|
'bm_closure', |
|
'bm_cq', |
|
'bm_call_create', |
|
'bm_error', |
|
'bm_chttp2_hpack', |
|
'bm_chttp2_transport', |
|
'bm_pollset', |
|
'bm_metadata', |
|
'bm_fullstack_trickle'] |
|
|
|
flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph') |
|
|
|
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..')) |
|
if not os.path.exists('reports'): |
|
os.makedirs('reports') |
|
|
|
start_port_server.start_port_server() |
|
|
|
def fnize(s): |
|
out = '' |
|
for c in s: |
|
if c in '<>, /': |
|
if len(out) and out[-1] == '_': continue |
|
out += '_' |
|
else: |
|
out += c |
|
return out |
|
|
|
# index html |
|
index_html = """ |
|
<html> |
|
<head> |
|
<title>Microbenchmark Results</title> |
|
</head> |
|
<body> |
|
""" |
|
|
|
def heading(name): |
|
global index_html |
|
index_html += "<h1>%s</h1>\n" % name |
|
|
|
def link(txt, tgt): |
|
global index_html |
|
index_html += "<p><a href=\"%s\">%s</a></p>\n" % ( |
|
cgi.escape(tgt, quote=True), cgi.escape(txt)) |
|
|
|
def text(txt): |
|
global index_html |
|
index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt) |
|
|
|
def collect_latency(bm_name, args): |
|
"""generate latency profiles""" |
|
benchmarks = [] |
|
profile_analysis = [] |
|
cleanup = [] |
|
|
|
heading('Latency Profiles: %s' % bm_name) |
|
subprocess.check_call( |
|
['make', bm_name, |
|
'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()]) |
|
for line in subprocess.check_output(['bins/basicprof/%s' % bm_name, |
|
'--benchmark_list_tests']).splitlines(): |
|
link(line, '%s.txt' % fnize(line)) |
|
benchmarks.append( |
|
jobset.JobSpec(['bins/basicprof/%s' % bm_name, |
|
'--benchmark_filter=^%s$' % line, |
|
'--benchmark_min_time=0.05'], |
|
environ={'LATENCY_TRACE': '%s.trace' % fnize(line)})) |
|
profile_analysis.append( |
|
jobset.JobSpec([sys.executable, |
|
'tools/profiling/latency_profile/profile_analyzer.py', |
|
'--source', '%s.trace' % fnize(line), '--fmt', 'simple', |
|
'--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=None)) |
|
cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)])) |
|
# periodically flush out the list of jobs: profile_analysis jobs at least |
|
# consume upwards of five gigabytes of ram in some cases, and so analysing |
|
# hundreds of them at once is impractical -- but we want at least some |
|
# concurrency or the work takes too long |
|
if len(benchmarks) >= min(16, multiprocessing.cpu_count()): |
|
# run up to half the cpu count: each benchmark can use up to two cores |
|
# (one for the microbenchmark, one for the data flush) |
|
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2)) |
|
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) |
|
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) |
|
benchmarks = [] |
|
profile_analysis = [] |
|
cleanup = [] |
|
# run the remaining benchmarks that weren't flushed |
|
if len(benchmarks): |
|
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2)) |
|
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) |
|
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) |
|
|
|
def collect_perf(bm_name, args): |
|
"""generate flamegraphs""" |
|
heading('Flamegraphs: %s' % bm_name) |
|
subprocess.check_call( |
|
['make', bm_name, |
|
'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()]) |
|
benchmarks = [] |
|
profile_analysis = [] |
|
cleanup = [] |
|
for line in subprocess.check_output(['bins/mutrace/%s' % bm_name, |
|
'--benchmark_list_tests']).splitlines(): |
|
link(line, '%s.svg' % fnize(line)) |
|
benchmarks.append( |
|
jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line), |
|
'-g', '-F', '997', |
|
'bins/mutrace/%s' % bm_name, |
|
'--benchmark_filter=^%s$' % line, |
|
'--benchmark_min_time=10'])) |
|
profile_analysis.append( |
|
jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'], |
|
environ = { |
|
'PERF_BASE_NAME': fnize(line), |
|
'OUTPUT_DIR': 'reports', |
|
'OUTPUT_FILENAME': fnize(line), |
|
})) |
|
cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)])) |
|
cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)])) |
|
# periodically flush out the list of jobs: temporary space required for this |
|
# processing is large |
|
if len(benchmarks) >= 20: |
|
# run up to half the cpu count: each benchmark can use up to two cores |
|
# (one for the microbenchmark, one for the data flush) |
|
jobset.run(benchmarks, maxjobs=1) |
|
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) |
|
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) |
|
benchmarks = [] |
|
profile_analysis = [] |
|
cleanup = [] |
|
# run the remaining benchmarks that weren't flushed |
|
if len(benchmarks): |
|
jobset.run(benchmarks, maxjobs=1) |
|
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) |
|
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) |
|
|
|
def run_summary(bm_name, cfg, base_json_name): |
|
subprocess.check_call( |
|
['make', bm_name, |
|
'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()]) |
|
cmd = ['bins/%s/%s' % (cfg, bm_name), |
|
'--benchmark_out=%s.%s.json' % (base_json_name, cfg), |
|
'--benchmark_out_format=json'] |
|
if args.summary_time is not None: |
|
cmd += ['--benchmark_min_time=%d' % args.summary_time] |
|
return subprocess.check_output(cmd) |
|
|
|
def collect_summary(bm_name, args): |
|
heading('Summary: %s [no counters]' % bm_name) |
|
text(run_summary(bm_name, 'opt', bm_name)) |
|
heading('Summary: %s [with counters]' % bm_name) |
|
text(run_summary(bm_name, 'counters', bm_name)) |
|
if args.bigquery_upload: |
|
with open('%s.csv' % bm_name, 'w') as f: |
|
f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py', |
|
'%s.counters.json' % bm_name, |
|
'%s.opt.json' % bm_name])) |
|
subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name]) |
|
|
|
collectors = { |
|
'latency': collect_latency, |
|
'perf': collect_perf, |
|
'summary': collect_summary, |
|
} |
|
|
|
argp = argparse.ArgumentParser(description='Collect data from microbenchmarks') |
|
argp.add_argument('-c', '--collect', |
|
choices=sorted(collectors.keys()), |
|
nargs='*', |
|
default=sorted(collectors.keys()), |
|
help='Which collectors should be run against each benchmark') |
|
argp.add_argument('-b', '--benchmarks', |
|
choices=_AVAILABLE_BENCHMARK_TESTS, |
|
default=_AVAILABLE_BENCHMARK_TESTS, |
|
nargs='+', |
|
type=str, |
|
help='Which microbenchmarks should be run') |
|
argp.add_argument('--diff_perf', |
|
default=None, |
|
type=str, |
|
help='Diff microbenchmarks against this git revision') |
|
argp.add_argument('--bigquery_upload', |
|
default=False, |
|
action='store_const', |
|
const=True, |
|
help='Upload results from summary collection to bigquery') |
|
argp.add_argument('--summary_time', |
|
default=None, |
|
type=int, |
|
help='Minimum time to run benchmarks for the summary collection') |
|
args = argp.parse_args() |
|
|
|
try: |
|
for collect in args.collect: |
|
for bm_name in args.benchmarks: |
|
collectors[collect](bm_name, args) |
|
if args.diff_perf: |
|
git_comment = 'Performance differences between this PR and %s\\n' % args.diff_perf |
|
if 'summary' not in args.collect: |
|
for bm_name in args.benchmarks: |
|
run_summary(bm_name, 'opt', bm_name) |
|
run_summary(bm_name, 'counters', bm_name) |
|
where_am_i = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip() |
|
# todo(mattkwong): uncomment this before merging |
|
# subprocess.check_call(['git', 'checkout', args.diff_perf]) |
|
comparables = [] |
|
subprocess.check_call(['make', 'clean']) |
|
try: |
|
for bm_name in args.benchmarks: |
|
try: |
|
run_summary(bm_name, 'opt', '%s.old' % bm_name) |
|
run_summary(bm_name, 'counters', '%s.old' % bm_name) |
|
comparables.append(bm_name) |
|
except subprocess.CalledProcessError, e: |
|
pass |
|
finally: |
|
subprocess.check_call(['git', 'checkout', where_am_i]) |
|
for bm_name in comparables: |
|
diff = subprocess.check_output(['tools/profiling/microbenchmarks/bm_diff.py', |
|
'%s.counters.json' % bm_name, |
|
'%s.opt.json' % bm_name, |
|
'%s.old.counters.json' % bm_name, |
|
'%s.old.opt.json' % bm_name]).strip() |
|
if diff: |
|
heading('Performance diff: %s' % bm_name) |
|
text(diff) |
|
git_comment += '```\\nPerformance diff: %s\\n%s\\n```\\n' % (bm_name, diff.replace('\n', '\\n')) |
|
finally: |
|
if args.diff_perf: |
|
subprocess.call(['tools/jenkins/comment_on_pr.sh "%s"' % git_comment.replace('`', '\`')], |
|
stdout=subprocess.PIPE, |
|
shell=True) |
|
if not os.path.exists('reports'): |
|
os.makedirs('reports') |
|
index_html += "</body>\n</html>\n" |
|
with open('reports/index.html', 'w') as f: |
|
f.write(index_html)
|
|
|