Modulized the bm_*.py functions

pull/11034/head
ncteisen 8 years ago
parent c7676a3d00
commit ab69ea3f9d
  1. 68
      tools/profiling/microbenchmarks/bm_build.py
  2. 56
      tools/profiling/microbenchmarks/bm_constants.py
  3. 200
      tools/profiling/microbenchmarks/bm_diff.py
  4. 77
      tools/profiling/microbenchmarks/bm_run.py
  5. 2
      tools/profiling/microbenchmarks/bm_speedup.py

@ -0,0 +1,68 @@
#!/usr/bin/env python2.7
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
### Python utility to build opt and counters benchmarks """
import bm_constants
import argparse
import subprocess
import multiprocessing
import os
import shutil
def _args():
argp = argparse.ArgumentParser(description='Builds microbenchmarks')
argp.add_argument('-b', '--benchmarks', nargs='+', choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, default=bm_constants._AVAILABLE_BENCHMARK_TESTS)
argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count())
argp.add_argument('-n', '--name', type=str, help='Unique name of this build')
return argp.parse_args()
def _make_cmd(cfg, jobs, benchmarks):
return ['make'] + benchmarks + [
'CONFIG=%s' % cfg, '-j', '%d' % jobs]
def build(name, jobs, benchmarks):
shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
subprocess.check_call(['git', 'submodule', 'update'])
try:
subprocess.check_call(_make_cmd('opt', jobs, benchmarks))
subprocess.check_call(_make_cmd('counters', jobs, benchmarks))
except subprocess.CalledProcessError, e:
subprocess.check_call(['make', 'clean'])
subprocess.check_call(_make_cmd('opt', jobs, benchmarks))
subprocess.check_call(_make_cmd('counters', jobs, benchmarks))
os.rename('bins', 'bm_diff_%s' % name, )
if __name__ == '__main__':
args = _args()
build(args.name, args.jobs, args.benchmarks)

@ -0,0 +1,56 @@
#!/usr/bin/env python2.7
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
### Configurable constants for the bm_*.py family """
_AVAILABLE_BENCHMARK_TESTS = ['bm_fullstack_unary_ping_pong',
'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump',
'bm_closure',
'bm_cq',
'bm_call_create',
'bm_error',
'bm_chttp2_hpack',
'bm_chttp2_transport',
'bm_pollset',
'bm_metadata',
'bm_fullstack_trickle']
_INTERESTING = (
'cpu_time',
'real_time',
'locks_per_iteration',
'allocs_per_iteration',
'writes_per_iteration',
'atm_cas_per_iteration',
'atm_add_per_iteration',
'nows_per_iteration',
)

@ -28,47 +28,18 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys """ Computes the diff between two bm runs and outputs significant results """
import json
import bm_json import bm_json
import bm_constants
import bm_speedup
import json
import tabulate import tabulate
import argparse import argparse
from scipy import stats
import subprocess
import multiprocessing
import collections import collections
import pipes
import os
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
import comment_on_pr
import jobset
import itertools
import speedup
import random
import shutil
import errno
_INTERESTING = (
'cpu_time',
'real_time',
'locks_per_iteration',
'allocs_per_iteration',
'writes_per_iteration',
'atm_cas_per_iteration',
'atm_add_per_iteration',
'cli_transport_stalls_per_iteration',
'cli_stream_stalls_per_iteration',
'svr_transport_stalls_per_iteration',
'svr_stream_stalls_per_iteration'
'nows_per_iteration',
)
def changed_ratio(n, o): verbose = False
if float(o) <= .0001: o = 0
if float(n) <= .0001: n = 0
if o == 0 and n == 0: return 0
if o == 0: return 100
return (float(n)-float(o))/float(o)
def median(ary): def median(ary):
ary = sorted(ary) ary = sorted(ary)
@ -78,91 +49,27 @@ def median(ary):
else: else:
return ary[n/2] return ary[n/2]
def min_change(pct): def _args():
return lambda n, o: abs(changed_ratio(n,o)) > pct/100.0 argp = argparse.ArgumentParser(description='Perform diff on microbenchmarks')
argp.add_argument('-t', '--track',
_AVAILABLE_BENCHMARK_TESTS = ['bm_fullstack_unary_ping_pong', choices=sorted(bm_constants._INTERESTING),
'bm_fullstack_streaming_ping_pong', nargs='+',
'bm_fullstack_streaming_pump', default=sorted(bm_constants._INTERESTING),
'bm_closure', help='Which metrics to track')
'bm_cq', argp.add_argument('-b', '--benchmarks', nargs='+', choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, default=bm_constants._AVAILABLE_BENCHMARK_TESTS)
'bm_call_create', argp.add_argument('-l', '--loops', type=int, default=20)
'bm_error', argp.add_argument('-n', '--new', type=str, help='New benchmark name')
'bm_chttp2_hpack', argp.add_argument('-o', '--old', type=str, help='Old benchmark name')
'bm_chttp2_transport', argp.add_argument('-v', '--verbose', type=bool, help='print details of before/after')
'bm_pollset', args = argp.parse_args()
'bm_metadata', global verbose
'bm_fullstack_trickle'] if args.verbose: verbose = True
assert args.new
argp = argparse.ArgumentParser(description='Perform diff on microbenchmarks') assert args.old
argp.add_argument('-t', '--track', return args
choices=sorted(_INTERESTING),
nargs='+', def maybe_print(str):
default=sorted(_INTERESTING), if verbose: print str
help='Which metrics to track')
argp.add_argument('-b', '--benchmarks', nargs='+', choices=_AVAILABLE_BENCHMARK_TESTS, default=['bm_cq'])
argp.add_argument('-d', '--diff_base', type=str)
argp.add_argument('-r', '--repetitions', type=int, default=1)
argp.add_argument('-l', '--loops', type=int, default=20)
argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count())
args = argp.parse_args()
assert args.diff_base
def avg(lst):
sum = 0.0
n = 0.0
for el in lst:
sum += el
n += 1
return sum / n
def make_cmd(cfg):
return ['make'] + args.benchmarks + [
'CONFIG=%s' % cfg, '-j', '%d' % args.jobs]
def build(dest):
shutil.rmtree('bm_diff_%s' % dest, ignore_errors=True)
subprocess.check_call(['git', 'submodule', 'update'])
try:
subprocess.check_call(make_cmd('opt'))
subprocess.check_call(make_cmd('counters'))
except subprocess.CalledProcessError, e:
subprocess.check_call(['make', 'clean'])
subprocess.check_call(make_cmd('opt'))
subprocess.check_call(make_cmd('counters'))
os.rename('bins', 'bm_diff_%s' % dest)
def collect1(bm, cfg, ver, idx):
cmd = ['bm_diff_%s/%s/%s' % (ver, cfg, bm),
'--benchmark_out=%s.%s.%s.%d.json' % (bm, cfg, ver, idx),
'--benchmark_out_format=json',
'--benchmark_repetitions=%d' % (args.repetitions)
]
return jobset.JobSpec(cmd, shortname='%s %s %s %d/%d' % (bm, cfg, ver, idx+1, args.loops),
verbose_success=True, timeout_seconds=None)
build('new')
where_am_i = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
subprocess.check_call(['git', 'checkout', args.diff_base])
try:
build('old')
finally:
subprocess.check_call(['git', 'checkout', where_am_i])
subprocess.check_call(['git', 'submodule', 'update'])
jobs = []
for loop in range(0, args.loops):
jobs.extend(x for x in itertools.chain(
(collect1(bm, 'opt', 'new', loop) for bm in args.benchmarks),
(collect1(bm, 'counters', 'new', loop) for bm in args.benchmarks),
(collect1(bm, 'opt', 'old', loop) for bm in args.benchmarks),
(collect1(bm, 'counters', 'old', loop) for bm in args.benchmarks),
))
random.shuffle(jobs, random.SystemRandom().random)
jobset.run(jobs, maxjobs=args.jobs)
class Benchmark: class Benchmark:
@ -173,18 +80,18 @@ class Benchmark:
} }
self.final = {} self.final = {}
def add_sample(self, data, new): def add_sample(self, track, data, new):
for f in args.track: for f in track:
if f in data: if f in data:
self.samples[new][f].append(float(data[f])) self.samples[new][f].append(float(data[f]))
def process(self): def process(self, track):
for f in sorted(args.track): for f in sorted(track):
new = self.samples[True][f] new = self.samples[True][f]
old = self.samples[False][f] old = self.samples[False][f]
if not new or not old: continue if not new or not old: continue
mdn_diff = abs(median(new) - median(old)) mdn_diff = abs(median(new) - median(old))
print '%s: new=%r old=%r mdn_diff=%r' % (f, new, old, mdn_diff) maybe_print('%s: new=%r old=%r mdn_diff=%r' % (f, new, old, mdn_diff))
s = speedup.speedup(new, old) s = speedup.speedup(new, old)
if abs(s) > 3 and mdn_diff > 0.5: if abs(s) > 3 and mdn_diff > 0.5:
self.final[f] = '%+d%%' % s self.final[f] = '%+d%%' % s
@ -196,29 +103,17 @@ class Benchmark:
def row(self, flds): def row(self, flds):
return [self.final[f] if f in self.final else '' for f in flds] return [self.final[f] if f in self.final else '' for f in flds]
def eintr_be_gone(fn):
"""Run fn until it doesn't stop because of EINTR"""
while True:
try:
return fn()
except IOError, e:
if e.errno != errno.EINTR:
raise
def read_json(filename): def read_json(filename):
try: try:
with open(filename) as f: return json.loads(f.read()) with open(filename) as f: return json.loads(f.read())
except ValueError, e: except ValueError, e:
return None return None
def finalize(bms, loops, track):
def finalize():
benchmarks = collections.defaultdict(Benchmark) benchmarks = collections.defaultdict(Benchmark)
for bm in args.benchmarks: for bm in bms:
for loop in range(0, args.loops): for loop in range(0, loops):
js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop)) js_new_ctr = read_json('%s.counters.new.%d.json' % (bm, loop))
js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop)) js_new_opt = read_json('%s.opt.new.%d.json' % (bm, loop))
js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop)) js_old_ctr = read_json('%s.counters.old.%d.json' % (bm, loop))
@ -226,22 +121,20 @@ def finalize():
if js_new_ctr: if js_new_ctr:
for row in bm_json.expand_json(js_new_ctr, js_new_opt): for row in bm_json.expand_json(js_new_ctr, js_new_opt):
print row
name = row['cpp_name'] name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'): continue if name.endswith('_mean') or name.endswith('_stddev'): continue
benchmarks[name].add_sample(row, True) benchmarks[name].add_sample(track, row, True)
if js_old_ctr: if js_old_ctr:
for row in bm_json.expand_json(js_old_ctr, js_old_opt): for row in bm_json.expand_json(js_old_ctr, js_old_opt):
print row
name = row['cpp_name'] name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'): continue if name.endswith('_mean') or name.endswith('_stddev'): continue
benchmarks[name].add_sample(row, False) benchmarks[name].add_sample(track, row, False)
really_interesting = set() really_interesting = set()
for name, bm in benchmarks.items(): for name, bm in benchmarks.items():
print name maybe_print(name)
really_interesting.update(bm.process()) really_interesting.update(bm.process(track))
fields = [f for f in args.track if f in really_interesting] fields = [f for f in track if f in really_interesting]
headers = ['Benchmark'] + fields headers = ['Benchmark'] + fields
rows = [] rows = []
@ -249,11 +142,12 @@ def finalize():
if benchmarks[name].skip(): continue if benchmarks[name].skip(): continue
rows.append([name] + benchmarks[name].row(fields)) rows.append([name] + benchmarks[name].row(fields))
if rows: if rows:
text = 'Performance differences noted:\n' + tabulate.tabulate(rows, headers=headers, floatfmt='+.2f') return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
else: else:
text = 'No significant performance differences' return None
print text
comment_on_pr.comment_on_pr('```\n%s\n```' % text) if __name__ == '__main__':
args = _args()
print finalize(args.benchmarks, args.loops, args.track)
eintr_be_gone(finalize)

@ -0,0 +1,77 @@
#!/usr/bin/env python2.7
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
### Python utility to run opt and counters benchmarks and save json output """
import bm_constants
import argparse
import multiprocessing
import random
import itertools
import sys
import os
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
import jobset
def _args():
argp = argparse.ArgumentParser(description='Runs microbenchmarks')
argp.add_argument('-b', '--benchmarks', nargs='+', choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, default=bm_constants._AVAILABLE_BENCHMARK_TESTS)
argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count())
argp.add_argument('-n', '--name', type=str, help='Unique name of this build')
argp.add_argument('-r', '--repetitions', type=int, default=1)
argp.add_argument('-l', '--loops', type=int, default=20)
return argp.parse_args()
def _collect_bm_data(bm, cfg, name, reps, idx):
cmd = ['bm_diff_%s/%s/%s' % (name, cfg, bm),
'--benchmark_out=%s.%s.%s.%d.json' % (bm, cfg, name, idx),
'--benchmark_out_format=json',
'--benchmark_repetitions=%d' % (reps)
]
return jobset.JobSpec(cmd, shortname='%s %s %s %d/%d' % (bm, cfg, name, idx+1, args.loops),
verbose_success=True, timeout_seconds=None)
def _run_bms(benchmarks, name, loops, reps):
jobs = []
for loop in range(0, loops):
jobs.extend(x for x in itertools.chain(
(_collect_bm_data(bm, 'opt', name, reps, loop) for bm in benchmarks),
(_collect_bm_data(bm, 'counters', name, reps, loop) for bm in benchmarks),
))
random.shuffle(jobs, random.SystemRandom().random)
jobset.run(jobs, maxjobs=args.jobs)
if __name__ == '__main__':
args = _args()
assert args.name
_run_bms(args.benchmarks, args.name, args.loops, args.repetitions)

@ -27,6 +27,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" The math behind the diff functionality """
from scipy import stats from scipy import stats
import math import math
Loading…
Cancel
Save