2 space indentation

pull/11367/head
ncteisen 8 years ago
parent 251b025b89
commit 07639167fc
  1. 76
      tools/profiling/microbenchmarks/bm_diff/bm_build.py
  2. 14
      tools/profiling/microbenchmarks/bm_diff/bm_constants.py
  3. 292
      tools/profiling/microbenchmarks/bm_diff/bm_diff.py
  4. 178
      tools/profiling/microbenchmarks/bm_diff/bm_main.py
  5. 142
      tools/profiling/microbenchmarks/bm_diff/bm_run.py
  6. 54
      tools/profiling/microbenchmarks/bm_diff/bm_speedup.py

@ -39,50 +39,50 @@ import shutil
def _args(): def _args():
argp = argparse.ArgumentParser(description='Builds microbenchmarks') argp = argparse.ArgumentParser(description='Builds microbenchmarks')
argp.add_argument( argp.add_argument(
'-b', '-b',
'--benchmarks', '--benchmarks',
nargs='+', nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS, default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Which benchmarks to build') help='Which benchmarks to build')
argp.add_argument( argp.add_argument(
'-j', '-j',
'--jobs', '--jobs',
type=int, type=int,
default=multiprocessing.cpu_count(), default=multiprocessing.cpu_count(),
help='How many CPUs to dedicate to this task') help='How many CPUs to dedicate to this task')
argp.add_argument( argp.add_argument(
'-n', '-n',
'--name', '--name',
type=str, type=str,
help='Unique name of this build. To be used as a handle to pass to the other bm* scripts' help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
) )
args = argp.parse_args() args = argp.parse_args()
assert args.name assert args.name
return args return args
def _make_cmd(cfg, benchmarks, jobs): def _make_cmd(cfg, benchmarks, jobs):
return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs] return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
def build(name, benchmarks, jobs): def build(name, benchmarks, jobs):
shutil.rmtree('bm_diff_%s' % name, ignore_errors=True) shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
subprocess.check_call(['git', 'submodule', 'update']) subprocess.check_call(['git', 'submodule', 'update'])
try: try:
subprocess.check_call(_make_cmd('opt', benchmarks, jobs)) subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
subprocess.check_call(_make_cmd('counters', benchmarks, jobs)) subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
except subprocess.CalledProcessError, e: except subprocess.CalledProcessError, e:
subprocess.check_call(['make', 'clean']) subprocess.check_call(['make', 'clean'])
subprocess.check_call(_make_cmd('opt', benchmarks, jobs)) subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
subprocess.check_call(_make_cmd('counters', benchmarks, jobs)) subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
os.rename( os.rename(
'bins', 'bins',
'bm_diff_%s' % name,) 'bm_diff_%s' % name,)
if __name__ == '__main__': if __name__ == '__main__':
args = _args() args = _args()
build(args.name, args.benchmarks, args.jobs) build(args.name, args.benchmarks, args.jobs)

@ -30,13 +30,13 @@
""" Configurable constants for the bm_*.py family """ """ Configurable constants for the bm_*.py family """
_AVAILABLE_BENCHMARK_TESTS = [ _AVAILABLE_BENCHMARK_TESTS = [
'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong', 'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create', 'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset', 'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
'bm_metadata', 'bm_fullstack_trickle' 'bm_metadata', 'bm_fullstack_trickle'
] ]
_INTERESTING = ('cpu_time', 'real_time', 'locks_per_iteration', _INTERESTING = ('cpu_time', 'real_time', 'locks_per_iteration',
'allocs_per_iteration', 'writes_per_iteration', 'allocs_per_iteration', 'writes_per_iteration',
'atm_cas_per_iteration', 'atm_add_per_iteration', 'atm_cas_per_iteration', 'atm_add_per_iteration',
'nows_per_iteration',) 'nows_per_iteration',)

@ -48,168 +48,168 @@ verbose = False
def _median(ary): def _median(ary):
assert (len(ary)) assert (len(ary))
ary = sorted(ary) ary = sorted(ary)
n = len(ary) n = len(ary)
if n % 2 == 0: if n % 2 == 0:
return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0 return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
else: else:
return ary[n / 2] return ary[n / 2]
def _args(): def _args():
argp = argparse.ArgumentParser( argp = argparse.ArgumentParser(
description='Perform diff on microbenchmarks') description='Perform diff on microbenchmarks')
argp.add_argument( argp.add_argument(
'-t', '-t',
'--track', '--track',
choices=sorted(bm_constants._INTERESTING), choices=sorted(bm_constants._INTERESTING),
nargs='+', nargs='+',
default=sorted(bm_constants._INTERESTING), default=sorted(bm_constants._INTERESTING),
help='Which metrics to track') help='Which metrics to track')
argp.add_argument( argp.add_argument(
'-b', '-b',
'--benchmarks', '--benchmarks',
nargs='+', nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS, default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Which benchmarks to run') help='Which benchmarks to run')
argp.add_argument( argp.add_argument(
'-l', '-l',
'--loops', '--loops',
type=int, type=int,
default=20, default=20,
help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py' help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
) )
argp.add_argument('-n', '--new', type=str, help='New benchmark name') argp.add_argument('-n', '--new', type=str, help='New benchmark name')
argp.add_argument('-o', '--old', type=str, help='Old benchmark name') argp.add_argument('-o', '--old', type=str, help='Old benchmark name')
argp.add_argument( argp.add_argument(
'-v', '--verbose', type=bool, help='Print details of before/after') '-v', '--verbose', type=bool, help='Print details of before/after')
args = argp.parse_args() args = argp.parse_args()
global verbose global verbose
if args.verbose: verbose = True if args.verbose: verbose = True
assert args.new assert args.new
assert args.old assert args.old
return args return args
def _maybe_print(str): def _maybe_print(str):
if verbose: print str if verbose: print str
class Benchmark: class Benchmark:
def __init__(self): def __init__(self):
self.samples = { self.samples = {
True: collections.defaultdict(list), True: collections.defaultdict(list),
False: collections.defaultdict(list) False: collections.defaultdict(list)
} }
self.final = {} self.final = {}
def add_sample(self, track, data, new): def add_sample(self, track, data, new):
for f in track: for f in track:
if f in data: if f in data:
self.samples[new][f].append(float(data[f])) self.samples[new][f].append(float(data[f]))
def process(self, track, new_name, old_name): def process(self, track, new_name, old_name):
for f in sorted(track): for f in sorted(track):
new = self.samples[True][f] new = self.samples[True][f]
old = self.samples[False][f] old = self.samples[False][f]
if not new or not old: continue if not new or not old: continue
mdn_diff = abs(_median(new) - _median(old)) mdn_diff = abs(_median(new) - _median(old))
_maybe_print('%s: %s=%r %s=%r mdn_diff=%r' % _maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
(f, new_name, new, old_name, old, mdn_diff)) (f, new_name, new, old_name, old, mdn_diff))
s = bm_speedup.speedup(new, old) s = bm_speedup.speedup(new, old)
if abs(s) > 3 and mdn_diff > 0.5: if abs(s) > 3 and mdn_diff > 0.5:
self.final[f] = '%+d%%' % s self.final[f] = '%+d%%' % s
return self.final.keys() return self.final.keys()
def skip(self): def skip(self):
return not self.final return not self.final
def row(self, flds): def row(self, flds):
return [self.final[f] if f in self.final else '' for f in flds] return [self.final[f] if f in self.final else '' for f in flds]
def _read_json(filename, badjson_files, nonexistant_files): def _read_json(filename, badjson_files, nonexistant_files):
stripped = ".".join(filename.split(".")[:-2]) stripped = ".".join(filename.split(".")[:-2])
try: try:
with open(filename) as f: with open(filename) as f:
return json.loads(f.read()) return json.loads(f.read())
except IOError, e: except IOError, e:
if stripped in nonexistant_files: if stripped in nonexistant_files:
nonexistant_files[stripped] += 1 nonexistant_files[stripped] += 1
else: else:
nonexistant_files[stripped] = 1 nonexistant_files[stripped] = 1
return None return None
except ValueError, e: except ValueError, e:
if stripped in badjson_files: if stripped in badjson_files:
badjson_files[stripped] += 1 badjson_files[stripped] += 1
else: else:
badjson_files[stripped] = 1 badjson_files[stripped] = 1
return None return None
def diff(bms, loops, track, old, new): def diff(bms, loops, track, old, new):
benchmarks = collections.defaultdict(Benchmark) benchmarks = collections.defaultdict(Benchmark)
badjson_files = {} badjson_files = {}
nonexistant_files = {} nonexistant_files = {}
for bm in bms: for bm in bms:
for loop in range(0, loops): for loop in range(0, loops):
for line in subprocess.check_output( for line in subprocess.check_output(
['bm_diff_%s/opt/%s' % (old, bm), ['bm_diff_%s/opt/%s' % (old, bm),
'--benchmark_list_tests']).splitlines(): '--benchmark_list_tests']).splitlines():
stripped_line = line.strip().replace("/", "_").replace( stripped_line = line.strip().replace("/", "_").replace(
"<", "_").replace(">", "_").replace(", ", "_") "<", "_").replace(">", "_").replace(", ", "_")
js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' % js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, new, loop), (bm, stripped_line, new, loop),
badjson_files, nonexistant_files) badjson_files, nonexistant_files)
js_new_opt = _read_json('%s.%s.opt.%s.%d.json' % js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
(bm, stripped_line, new, loop), (bm, stripped_line, new, loop),
badjson_files, nonexistant_files) badjson_files, nonexistant_files)
js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' % js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, old, loop), (bm, stripped_line, old, loop),
badjson_files, nonexistant_files) badjson_files, nonexistant_files)
js_old_opt = _read_json('%s.%s.opt.%s.%d.json' % js_old_opt = _read_json('%s.%s.opt.%s.%d.json' %
(bm, stripped_line, old, loop), (bm, stripped_line, old, loop),
badjson_files, nonexistant_files) badjson_files, nonexistant_files)
if js_new_ctr: if js_new_ctr:
for row in bm_json.expand_json(js_new_ctr, js_new_opt): for row in bm_json.expand_json(js_new_ctr, js_new_opt):
name = row['cpp_name'] name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'): if name.endswith('_mean') or name.endswith('_stddev'):
continue continue
benchmarks[name].add_sample(track, row, True) benchmarks[name].add_sample(track, row, True)
if js_old_ctr: if js_old_ctr:
for row in bm_json.expand_json(js_old_ctr, js_old_opt): for row in bm_json.expand_json(js_old_ctr, js_old_opt):
name = row['cpp_name'] name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'): if name.endswith('_mean') or name.endswith('_stddev'):
continue continue
benchmarks[name].add_sample(track, row, False) benchmarks[name].add_sample(track, row, False)
really_interesting = set() really_interesting = set()
for name, bm in benchmarks.items(): for name, bm in benchmarks.items():
_maybe_print(name) _maybe_print(name)
really_interesting.update(bm.process(track, new, old)) really_interesting.update(bm.process(track, new, old))
fields = [f for f in track if f in really_interesting] fields = [f for f in track if f in really_interesting]
headers = ['Benchmark'] + fields headers = ['Benchmark'] + fields
rows = [] rows = []
for name in sorted(benchmarks.keys()): for name in sorted(benchmarks.keys()):
if benchmarks[name].skip(): continue if benchmarks[name].skip(): continue
rows.append([name] + benchmarks[name].row(fields)) rows.append([name] + benchmarks[name].row(fields))
note = 'Corrupt JSON data (indicates timeout or crash) = %s' % str( note = 'Corrupt JSON data (indicates timeout or crash) = %s' % str(
badjson_files) badjson_files)
note += '\n\nMissing files (new benchmark) = %s' % str(nonexistant_files) note += '\n\nMissing files (new benchmark) = %s' % str(nonexistant_files)
if rows: if rows:
return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note
else: else:
return None, note return None, note
if __name__ == '__main__': if __name__ == '__main__':
args = _args() args = _args()
diff, note = diff(args.benchmarks, args.loops, args.track, args.old, diff, note = diff(args.benchmarks, args.loops, args.track, args.old,
args.new) args.new)
print('%s\n%s' % (note, diff if diff else "No performance differences")) print('%s\n%s' % (note, diff if diff else "No performance differences"))

@ -41,108 +41,108 @@ import multiprocessing
import subprocess import subprocess
sys.path.append( sys.path.append(
os.path.join( os.path.join(
os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils')) os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
import comment_on_pr import comment_on_pr
def _args(): def _args():
argp = argparse.ArgumentParser( argp = argparse.ArgumentParser(
description='Perform diff on microbenchmarks') description='Perform diff on microbenchmarks')
argp.add_argument( argp.add_argument(
'-t', '-t',
'--track', '--track',
choices=sorted(bm_constants._INTERESTING), choices=sorted(bm_constants._INTERESTING),
nargs='+', nargs='+',
default=sorted(bm_constants._INTERESTING), default=sorted(bm_constants._INTERESTING),
help='Which metrics to track') help='Which metrics to track')
argp.add_argument( argp.add_argument(
'-b', '-b',
'--benchmarks', '--benchmarks',
nargs='+', nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS, default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Which benchmarks to run') help='Which benchmarks to run')
argp.add_argument( argp.add_argument(
'-d', '-d',
'--diff_base', '--diff_base',
type=str, type=str,
help='Commit or branch to compare the current one to') help='Commit or branch to compare the current one to')
argp.add_argument( argp.add_argument(
'-o', '-o',
'--old', '--old',
default='old', default='old',
type=str, type=str,
help='Name of baseline run to compare to. Ususally just called "old"') help='Name of baseline run to compare to. Ususally just called "old"')
argp.add_argument( argp.add_argument(
'-r', '-r',
'--repetitions', '--repetitions',
type=int, type=int,
default=1, default=1,
help='Number of repetitions to pass to the benchmarks') help='Number of repetitions to pass to the benchmarks')
argp.add_argument( argp.add_argument(
'-l', '-l',
'--loops', '--loops',
type=int, type=int,
default=20, default=20,
help='Number of times to loops the benchmarks. More loops cuts down on noise' help='Number of times to loops the benchmarks. More loops cuts down on noise'
) )
argp.add_argument( argp.add_argument(
'-j', '-j',
'--jobs', '--jobs',
type=int, type=int,
default=multiprocessing.cpu_count(), default=multiprocessing.cpu_count(),
help='Number of CPUs to use') help='Number of CPUs to use')
args = argp.parse_args() args = argp.parse_args()
assert args.diff_base or args.old, "One of diff_base or old must be set!" assert args.diff_base or args.old, "One of diff_base or old must be set!"
if args.loops < 3: if args.loops < 3:
print "WARNING: This run will likely be noisy. Increase loops." print "WARNING: This run will likely be noisy. Increase loops."
return args return args
def eintr_be_gone(fn): def eintr_be_gone(fn):
"""Run fn until it doesn't stop because of EINTR""" """Run fn until it doesn't stop because of EINTR"""
def inner(*args): def inner(*args):
while True: while True:
try: try:
return fn(*args) return fn(*args)
except IOError, e: except IOError, e:
if e.errno != errno.EINTR: if e.errno != errno.EINTR:
raise raise
return inner return inner
def main(args): def main(args):
bm_build.build('new', args.benchmarks, args.jobs) bm_build.build('new', args.benchmarks, args.jobs)
old = args.old old = args.old
if args.diff_base: if args.diff_base:
old = 'old' old = 'old'
where_am_i = subprocess.check_output( where_am_i = subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip() ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
subprocess.check_call(['git', 'checkout', args.diff_base]) subprocess.check_call(['git', 'checkout', args.diff_base])
try: try:
bm_build.build('old', args.benchmarks, args.jobs) bm_build.build('old', args.benchmarks, args.jobs)
finally: finally:
subprocess.check_call(['git', 'checkout', where_am_i]) subprocess.check_call(['git', 'checkout', where_am_i])
subprocess.check_call(['git', 'submodule', 'update']) subprocess.check_call(['git', 'submodule', 'update'])
bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.repetitions) bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.repetitions)
bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.repetitions) bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.repetitions)
diff, note = bm_diff.diff(args.benchmarks, args.loops, args.track, old, diff, note = bm_diff.diff(args.benchmarks, args.loops, args.track, old,
'new') 'new')
if diff: if diff:
text = 'Performance differences noted:\n' + diff text = 'Performance differences noted:\n' + diff
else: else:
text = 'No significant performance differences' text = 'No significant performance differences'
print('%s\n%s' % (note, text)) print('%s\n%s' % (note, text))
comment_on_pr.comment_on_pr('```\n%s\n\n%s\n```' % (note, text)) comment_on_pr.comment_on_pr('```\n%s\n\n%s\n```' % (note, text))
if __name__ == '__main__': if __name__ == '__main__':
args = _args() args = _args()
main(args) main(args)

@ -40,87 +40,87 @@ import sys
import os import os
sys.path.append( sys.path.append(
os.path.join( os.path.join(
os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests', os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
'python_utils')) 'python_utils'))
import jobset import jobset
def _args(): def _args():
argp = argparse.ArgumentParser(description='Runs microbenchmarks') argp = argparse.ArgumentParser(description='Runs microbenchmarks')
argp.add_argument( argp.add_argument(
'-b', '-b',
'--benchmarks', '--benchmarks',
nargs='+', nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS, choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS, default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Benchmarks to run') help='Benchmarks to run')
argp.add_argument( argp.add_argument(
'-j', '-j',
'--jobs', '--jobs',
type=int, type=int,
default=multiprocessing.cpu_count(), default=multiprocessing.cpu_count(),
help='Number of CPUs to use') help='Number of CPUs to use')
argp.add_argument( argp.add_argument(
'-n', '-n',
'--name', '--name',
type=str, type=str,
help='Unique name of the build to run. Needs to match the handle passed to bm_build.py' help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
) )
argp.add_argument( argp.add_argument(
'-r', '-r',
'--repetitions', '--repetitions',
type=int, type=int,
default=1, default=1,
help='Number of repetitions to pass to the benchmarks') help='Number of repetitions to pass to the benchmarks')
argp.add_argument( argp.add_argument(
'-l', '-l',
'--loops', '--loops',
type=int, type=int,
default=20, default=20,
help='Number of times to loops the benchmarks. More loops cuts down on noise' help='Number of times to loops the benchmarks. More loops cuts down on noise'
) )
args = argp.parse_args() args = argp.parse_args()
assert args.name assert args.name
if args.loops < 3: if args.loops < 3:
print "WARNING: This run will likely be noisy. Increase loops to at least 3." print "WARNING: This run will likely be noisy. Increase loops to at least 3."
return args return args
def _collect_bm_data(bm, cfg, name, reps, idx, loops): def _collect_bm_data(bm, cfg, name, reps, idx, loops):
jobs_list = [] jobs_list = []
for line in subprocess.check_output( for line in subprocess.check_output(
['bm_diff_%s/%s/%s' % (name, cfg, bm), ['bm_diff_%s/%s/%s' % (name, cfg, bm),
'--benchmark_list_tests']).splitlines(): '--benchmark_list_tests']).splitlines():
stripped_line = line.strip().replace("/", "_").replace( stripped_line = line.strip().replace("/", "_").replace(
"<", "_").replace(">", "_").replace(", ", "_") "<", "_").replace(">", "_").replace(", ", "_")
cmd = [ cmd = [
'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' % 'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' %
line, '--benchmark_out=%s.%s.%s.%s.%d.json' % line, '--benchmark_out=%s.%s.%s.%s.%d.json' %
(bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json', (bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json',
'--benchmark_repetitions=%d' % (reps) '--benchmark_repetitions=%d' % (reps)
] ]
jobs_list.append( jobs_list.append(
jobset.JobSpec( jobset.JobSpec(
cmd, cmd,
shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1, shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
loops), loops),
verbose_success=True, verbose_success=True,
timeout_seconds=60 * 2)) timeout_seconds=60 * 2))
return jobs_list return jobs_list
def run(name, benchmarks, jobs, loops, reps): def run(name, benchmarks, jobs, loops, reps):
jobs_list = [] jobs_list = []
for loop in range(0, loops): for loop in range(0, loops):
for bm in benchmarks: for bm in benchmarks:
jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops) jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops)
jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop, jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop,
loops) loops)
random.shuffle(jobs_list, random.SystemRandom().random) random.shuffle(jobs_list, random.SystemRandom().random)
jobset.run(jobs_list, maxjobs=jobs) jobset.run(jobs_list, maxjobs=jobs)
if __name__ == '__main__': if __name__ == '__main__':
args = _args() args = _args()
run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions) run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions)

@ -36,39 +36,39 @@ _THRESHOLD = 1e-10
def scale(a, mul): def scale(a, mul):
return [x * mul for x in a] return [x * mul for x in a]
def cmp(a, b): def cmp(a, b):
return stats.ttest_ind(a, b) return stats.ttest_ind(a, b)
def speedup(new, old): def speedup(new, old):
if (len(set(new))) == 1 and new == old: return 0 if (len(set(new))) == 1 and new == old: return 0
s0, p0 = cmp(new, old) s0, p0 = cmp(new, old)
if math.isnan(p0): return 0 if math.isnan(p0): return 0
if s0 == 0: return 0 if s0 == 0: return 0
if p0 > _THRESHOLD: return 0 if p0 > _THRESHOLD: return 0
if s0 < 0: if s0 < 0:
pct = 1 pct = 1
while pct < 101: while pct < 101:
sp, pp = cmp(new, scale(old, 1 - pct / 100.0)) sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
if sp > 0: break if sp > 0: break
if pp > _THRESHOLD: break if pp > _THRESHOLD: break
pct += 1 pct += 1
return -(pct - 1) return -(pct - 1)
else: else:
pct = 1 pct = 1
while pct < 100000: while pct < 100000:
sp, pp = cmp(new, scale(old, 1 + pct / 100.0)) sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
if sp < 0: break if sp < 0: break
if pp > _THRESHOLD: break if pp > _THRESHOLD: break
pct += 1 pct += 1
return pct - 1 return pct - 1
if __name__ == "__main__": if __name__ == "__main__":
new = [1.0, 1.0, 1.0, 1.0] new = [1.0, 1.0, 1.0, 1.0]
old = [2.0, 2.0, 2.0, 2.0] old = [2.0, 2.0, 2.0, 2.0]
print speedup(new, old) print speedup(new, old)
print speedup(old, new) print speedup(old, new)

Loading…
Cancel
Save