2 space indentation

pull/11367/head
ncteisen 8 years ago
parent 251b025b89
commit 07639167fc
  1. 76
      tools/profiling/microbenchmarks/bm_diff/bm_build.py
  2. 14
      tools/profiling/microbenchmarks/bm_diff/bm_constants.py
  3. 292
      tools/profiling/microbenchmarks/bm_diff/bm_diff.py
  4. 178
      tools/profiling/microbenchmarks/bm_diff/bm_main.py
  5. 142
      tools/profiling/microbenchmarks/bm_diff/bm_run.py
  6. 54
      tools/profiling/microbenchmarks/bm_diff/bm_speedup.py

@ -39,50 +39,50 @@ import shutil
def _args():
argp = argparse.ArgumentParser(description='Builds microbenchmarks')
argp.add_argument(
'-b',
'--benchmarks',
nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Which benchmarks to build')
argp.add_argument(
'-j',
'--jobs',
type=int,
default=multiprocessing.cpu_count(),
help='How many CPUs to dedicate to this task')
argp.add_argument(
'-n',
'--name',
type=str,
help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
)
args = argp.parse_args()
assert args.name
return args
argp = argparse.ArgumentParser(description='Builds microbenchmarks')
argp.add_argument(
'-b',
'--benchmarks',
nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Which benchmarks to build')
argp.add_argument(
'-j',
'--jobs',
type=int,
default=multiprocessing.cpu_count(),
help='How many CPUs to dedicate to this task')
argp.add_argument(
'-n',
'--name',
type=str,
help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
)
args = argp.parse_args()
assert args.name
return args
def _make_cmd(cfg, benchmarks, jobs):
return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
def build(name, benchmarks, jobs):
shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
subprocess.check_call(['git', 'submodule', 'update'])
try:
subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
except subprocess.CalledProcessError, e:
subprocess.check_call(['make', 'clean'])
subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
os.rename(
'bins',
'bm_diff_%s' % name,)
shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
subprocess.check_call(['git', 'submodule', 'update'])
try:
subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
except subprocess.CalledProcessError, e:
subprocess.check_call(['make', 'clean'])
subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
os.rename(
'bins',
'bm_diff_%s' % name,)
if __name__ == '__main__':
args = _args()
build(args.name, args.benchmarks, args.jobs)
args = _args()
build(args.name, args.benchmarks, args.jobs)

@ -30,13 +30,13 @@
""" Configurable constants for the bm_*.py family """
_AVAILABLE_BENCHMARK_TESTS = [
'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
'bm_metadata', 'bm_fullstack_trickle'
'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
'bm_metadata', 'bm_fullstack_trickle'
]
_INTERESTING = ('cpu_time', 'real_time', 'locks_per_iteration',
'allocs_per_iteration', 'writes_per_iteration',
'atm_cas_per_iteration', 'atm_add_per_iteration',
'nows_per_iteration',)
'allocs_per_iteration', 'writes_per_iteration',
'atm_cas_per_iteration', 'atm_add_per_iteration',
'nows_per_iteration',)

@ -48,168 +48,168 @@ verbose = False
def _median(ary):
assert (len(ary))
ary = sorted(ary)
n = len(ary)
if n % 2 == 0:
return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
else:
return ary[n / 2]
assert (len(ary))
ary = sorted(ary)
n = len(ary)
if n % 2 == 0:
return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
else:
return ary[n / 2]
def _args():
argp = argparse.ArgumentParser(
description='Perform diff on microbenchmarks')
argp.add_argument(
'-t',
'--track',
choices=sorted(bm_constants._INTERESTING),
nargs='+',
default=sorted(bm_constants._INTERESTING),
help='Which metrics to track')
argp.add_argument(
'-b',
'--benchmarks',
nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Which benchmarks to run')
argp.add_argument(
'-l',
'--loops',
type=int,
default=20,
help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
)
argp.add_argument('-n', '--new', type=str, help='New benchmark name')
argp.add_argument('-o', '--old', type=str, help='Old benchmark name')
argp.add_argument(
'-v', '--verbose', type=bool, help='Print details of before/after')
args = argp.parse_args()
global verbose
if args.verbose: verbose = True
assert args.new
assert args.old
return args
argp = argparse.ArgumentParser(
description='Perform diff on microbenchmarks')
argp.add_argument(
'-t',
'--track',
choices=sorted(bm_constants._INTERESTING),
nargs='+',
default=sorted(bm_constants._INTERESTING),
help='Which metrics to track')
argp.add_argument(
'-b',
'--benchmarks',
nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Which benchmarks to run')
argp.add_argument(
'-l',
'--loops',
type=int,
default=20,
help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
)
argp.add_argument('-n', '--new', type=str, help='New benchmark name')
argp.add_argument('-o', '--old', type=str, help='Old benchmark name')
argp.add_argument(
'-v', '--verbose', type=bool, help='Print details of before/after')
args = argp.parse_args()
global verbose
if args.verbose: verbose = True
assert args.new
assert args.old
return args
def _maybe_print(str):
if verbose: print str
if verbose: print str
class Benchmark:
def __init__(self):
self.samples = {
True: collections.defaultdict(list),
False: collections.defaultdict(list)
}
self.final = {}
def add_sample(self, track, data, new):
for f in track:
if f in data:
self.samples[new][f].append(float(data[f]))
def process(self, track, new_name, old_name):
for f in sorted(track):
new = self.samples[True][f]
old = self.samples[False][f]
if not new or not old: continue
mdn_diff = abs(_median(new) - _median(old))
_maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
(f, new_name, new, old_name, old, mdn_diff))
s = bm_speedup.speedup(new, old)
if abs(s) > 3 and mdn_diff > 0.5:
self.final[f] = '%+d%%' % s
return self.final.keys()
def skip(self):
return not self.final
def row(self, flds):
return [self.final[f] if f in self.final else '' for f in flds]
def __init__(self):
self.samples = {
True: collections.defaultdict(list),
False: collections.defaultdict(list)
}
self.final = {}
def add_sample(self, track, data, new):
for f in track:
if f in data:
self.samples[new][f].append(float(data[f]))
def process(self, track, new_name, old_name):
for f in sorted(track):
new = self.samples[True][f]
old = self.samples[False][f]
if not new or not old: continue
mdn_diff = abs(_median(new) - _median(old))
_maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
(f, new_name, new, old_name, old, mdn_diff))
s = bm_speedup.speedup(new, old)
if abs(s) > 3 and mdn_diff > 0.5:
self.final[f] = '%+d%%' % s
return self.final.keys()
def skip(self):
return not self.final
def row(self, flds):
return [self.final[f] if f in self.final else '' for f in flds]
def _read_json(filename, badjson_files, nonexistant_files):
stripped = ".".join(filename.split(".")[:-2])
try:
with open(filename) as f:
return json.loads(f.read())
except IOError, e:
if stripped in nonexistant_files:
nonexistant_files[stripped] += 1
else:
nonexistant_files[stripped] = 1
return None
except ValueError, e:
if stripped in badjson_files:
badjson_files[stripped] += 1
else:
badjson_files[stripped] = 1
return None
stripped = ".".join(filename.split(".")[:-2])
try:
with open(filename) as f:
return json.loads(f.read())
except IOError, e:
if stripped in nonexistant_files:
nonexistant_files[stripped] += 1
else:
nonexistant_files[stripped] = 1
return None
except ValueError, e:
if stripped in badjson_files:
badjson_files[stripped] += 1
else:
badjson_files[stripped] = 1
return None
def diff(bms, loops, track, old, new):
benchmarks = collections.defaultdict(Benchmark)
badjson_files = {}
nonexistant_files = {}
for bm in bms:
for loop in range(0, loops):
for line in subprocess.check_output(
['bm_diff_%s/opt/%s' % (old, bm),
'--benchmark_list_tests']).splitlines():
stripped_line = line.strip().replace("/", "_").replace(
"<", "_").replace(">", "_").replace(", ", "_")
js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, new, loop),
badjson_files, nonexistant_files)
js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
(bm, stripped_line, new, loop),
badjson_files, nonexistant_files)
js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, old, loop),
badjson_files, nonexistant_files)
js_old_opt = _read_json('%s.%s.opt.%s.%d.json' %
(bm, stripped_line, old, loop),
badjson_files, nonexistant_files)
if js_new_ctr:
for row in bm_json.expand_json(js_new_ctr, js_new_opt):
name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'):
continue
benchmarks[name].add_sample(track, row, True)
if js_old_ctr:
for row in bm_json.expand_json(js_old_ctr, js_old_opt):
name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'):
continue
benchmarks[name].add_sample(track, row, False)
really_interesting = set()
for name, bm in benchmarks.items():
_maybe_print(name)
really_interesting.update(bm.process(track, new, old))
fields = [f for f in track if f in really_interesting]
headers = ['Benchmark'] + fields
rows = []
for name in sorted(benchmarks.keys()):
if benchmarks[name].skip(): continue
rows.append([name] + benchmarks[name].row(fields))
note = 'Corrupt JSON data (indicates timeout or crash) = %s' % str(
badjson_files)
note += '\n\nMissing files (new benchmark) = %s' % str(nonexistant_files)
if rows:
return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note
else:
return None, note
benchmarks = collections.defaultdict(Benchmark)
badjson_files = {}
nonexistant_files = {}
for bm in bms:
for loop in range(0, loops):
for line in subprocess.check_output(
['bm_diff_%s/opt/%s' % (old, bm),
'--benchmark_list_tests']).splitlines():
stripped_line = line.strip().replace("/", "_").replace(
"<", "_").replace(">", "_").replace(", ", "_")
js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, new, loop),
badjson_files, nonexistant_files)
js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
(bm, stripped_line, new, loop),
badjson_files, nonexistant_files)
js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
(bm, stripped_line, old, loop),
badjson_files, nonexistant_files)
js_old_opt = _read_json('%s.%s.opt.%s.%d.json' %
(bm, stripped_line, old, loop),
badjson_files, nonexistant_files)
if js_new_ctr:
for row in bm_json.expand_json(js_new_ctr, js_new_opt):
name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'):
continue
benchmarks[name].add_sample(track, row, True)
if js_old_ctr:
for row in bm_json.expand_json(js_old_ctr, js_old_opt):
name = row['cpp_name']
if name.endswith('_mean') or name.endswith('_stddev'):
continue
benchmarks[name].add_sample(track, row, False)
really_interesting = set()
for name, bm in benchmarks.items():
_maybe_print(name)
really_interesting.update(bm.process(track, new, old))
fields = [f for f in track if f in really_interesting]
headers = ['Benchmark'] + fields
rows = []
for name in sorted(benchmarks.keys()):
if benchmarks[name].skip(): continue
rows.append([name] + benchmarks[name].row(fields))
note = 'Corrupt JSON data (indicates timeout or crash) = %s' % str(
badjson_files)
note += '\n\nMissing files (new benchmark) = %s' % str(nonexistant_files)
if rows:
return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note
else:
return None, note
if __name__ == '__main__':
args = _args()
diff, note = diff(args.benchmarks, args.loops, args.track, args.old,
args.new)
print('%s\n%s' % (note, diff if diff else "No performance differences"))
args = _args()
diff, note = diff(args.benchmarks, args.loops, args.track, args.old,
args.new)
print('%s\n%s' % (note, diff if diff else "No performance differences"))

@ -41,108 +41,108 @@ import multiprocessing
import subprocess
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
os.path.join(
os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
import comment_on_pr
def _args():
argp = argparse.ArgumentParser(
description='Perform diff on microbenchmarks')
argp.add_argument(
'-t',
'--track',
choices=sorted(bm_constants._INTERESTING),
nargs='+',
default=sorted(bm_constants._INTERESTING),
help='Which metrics to track')
argp.add_argument(
'-b',
'--benchmarks',
nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Which benchmarks to run')
argp.add_argument(
'-d',
'--diff_base',
type=str,
help='Commit or branch to compare the current one to')
argp.add_argument(
'-o',
'--old',
default='old',
type=str,
help='Name of baseline run to compare to. Ususally just called "old"')
argp.add_argument(
'-r',
'--repetitions',
type=int,
default=1,
help='Number of repetitions to pass to the benchmarks')
argp.add_argument(
'-l',
'--loops',
type=int,
default=20,
help='Number of times to loops the benchmarks. More loops cuts down on noise'
)
argp.add_argument(
'-j',
'--jobs',
type=int,
default=multiprocessing.cpu_count(),
help='Number of CPUs to use')
args = argp.parse_args()
assert args.diff_base or args.old, "One of diff_base or old must be set!"
if args.loops < 3:
print "WARNING: This run will likely be noisy. Increase loops."
return args
argp = argparse.ArgumentParser(
description='Perform diff on microbenchmarks')
argp.add_argument(
'-t',
'--track',
choices=sorted(bm_constants._INTERESTING),
nargs='+',
default=sorted(bm_constants._INTERESTING),
help='Which metrics to track')
argp.add_argument(
'-b',
'--benchmarks',
nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Which benchmarks to run')
argp.add_argument(
'-d',
'--diff_base',
type=str,
help='Commit or branch to compare the current one to')
argp.add_argument(
'-o',
'--old',
default='old',
type=str,
help='Name of baseline run to compare to. Ususally just called "old"')
argp.add_argument(
'-r',
'--repetitions',
type=int,
default=1,
help='Number of repetitions to pass to the benchmarks')
argp.add_argument(
'-l',
'--loops',
type=int,
default=20,
help='Number of times to loops the benchmarks. More loops cuts down on noise'
)
argp.add_argument(
'-j',
'--jobs',
type=int,
default=multiprocessing.cpu_count(),
help='Number of CPUs to use')
args = argp.parse_args()
assert args.diff_base or args.old, "One of diff_base or old must be set!"
if args.loops < 3:
print "WARNING: This run will likely be noisy. Increase loops."
return args
def eintr_be_gone(fn):
"""Run fn until it doesn't stop because of EINTR"""
"""Run fn until it doesn't stop because of EINTR"""
def inner(*args):
while True:
try:
return fn(*args)
except IOError, e:
if e.errno != errno.EINTR:
raise
def inner(*args):
while True:
try:
return fn(*args)
except IOError, e:
if e.errno != errno.EINTR:
raise
return inner
return inner
def main(args):
bm_build.build('new', args.benchmarks, args.jobs)
old = args.old
if args.diff_base:
old = 'old'
where_am_i = subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
subprocess.check_call(['git', 'checkout', args.diff_base])
try:
bm_build.build('old', args.benchmarks, args.jobs)
finally:
subprocess.check_call(['git', 'checkout', where_am_i])
subprocess.check_call(['git', 'submodule', 'update'])
bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.repetitions)
bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.repetitions)
diff, note = bm_diff.diff(args.benchmarks, args.loops, args.track, old,
'new')
if diff:
text = 'Performance differences noted:\n' + diff
else:
text = 'No significant performance differences'
print('%s\n%s' % (note, text))
comment_on_pr.comment_on_pr('```\n%s\n\n%s\n```' % (note, text))
bm_build.build('new', args.benchmarks, args.jobs)
old = args.old
if args.diff_base:
old = 'old'
where_am_i = subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
subprocess.check_call(['git', 'checkout', args.diff_base])
try:
bm_build.build('old', args.benchmarks, args.jobs)
finally:
subprocess.check_call(['git', 'checkout', where_am_i])
subprocess.check_call(['git', 'submodule', 'update'])
bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.repetitions)
bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.repetitions)
diff, note = bm_diff.diff(args.benchmarks, args.loops, args.track, old,
'new')
if diff:
text = 'Performance differences noted:\n' + diff
else:
text = 'No significant performance differences'
print('%s\n%s' % (note, text))
comment_on_pr.comment_on_pr('```\n%s\n\n%s\n```' % (note, text))
if __name__ == '__main__':
args = _args()
main(args)
args = _args()
main(args)

@ -40,87 +40,87 @@ import sys
import os
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
'python_utils'))
os.path.join(
os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
'python_utils'))
import jobset
def _args():
argp = argparse.ArgumentParser(description='Runs microbenchmarks')
argp.add_argument(
'-b',
'--benchmarks',
nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Benchmarks to run')
argp.add_argument(
'-j',
'--jobs',
type=int,
default=multiprocessing.cpu_count(),
help='Number of CPUs to use')
argp.add_argument(
'-n',
'--name',
type=str,
help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
)
argp.add_argument(
'-r',
'--repetitions',
type=int,
default=1,
help='Number of repetitions to pass to the benchmarks')
argp.add_argument(
'-l',
'--loops',
type=int,
default=20,
help='Number of times to loops the benchmarks. More loops cuts down on noise'
)
args = argp.parse_args()
assert args.name
if args.loops < 3:
print "WARNING: This run will likely be noisy. Increase loops to at least 3."
return args
argp = argparse.ArgumentParser(description='Runs microbenchmarks')
argp.add_argument(
'-b',
'--benchmarks',
nargs='+',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
help='Benchmarks to run')
argp.add_argument(
'-j',
'--jobs',
type=int,
default=multiprocessing.cpu_count(),
help='Number of CPUs to use')
argp.add_argument(
'-n',
'--name',
type=str,
help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
)
argp.add_argument(
'-r',
'--repetitions',
type=int,
default=1,
help='Number of repetitions to pass to the benchmarks')
argp.add_argument(
'-l',
'--loops',
type=int,
default=20,
help='Number of times to loops the benchmarks. More loops cuts down on noise'
)
args = argp.parse_args()
assert args.name
if args.loops < 3:
print "WARNING: This run will likely be noisy. Increase loops to at least 3."
return args
def _collect_bm_data(bm, cfg, name, reps, idx, loops):
jobs_list = []
for line in subprocess.check_output(
['bm_diff_%s/%s/%s' % (name, cfg, bm),
'--benchmark_list_tests']).splitlines():
stripped_line = line.strip().replace("/", "_").replace(
"<", "_").replace(">", "_").replace(", ", "_")
cmd = [
'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' %
line, '--benchmark_out=%s.%s.%s.%s.%d.json' %
(bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json',
'--benchmark_repetitions=%d' % (reps)
]
jobs_list.append(
jobset.JobSpec(
cmd,
shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
loops),
verbose_success=True,
timeout_seconds=60 * 2))
return jobs_list
jobs_list = []
for line in subprocess.check_output(
['bm_diff_%s/%s/%s' % (name, cfg, bm),
'--benchmark_list_tests']).splitlines():
stripped_line = line.strip().replace("/", "_").replace(
"<", "_").replace(">", "_").replace(", ", "_")
cmd = [
'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' %
line, '--benchmark_out=%s.%s.%s.%s.%d.json' %
(bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json',
'--benchmark_repetitions=%d' % (reps)
]
jobs_list.append(
jobset.JobSpec(
cmd,
shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
loops),
verbose_success=True,
timeout_seconds=60 * 2))
return jobs_list
def run(name, benchmarks, jobs, loops, reps):
jobs_list = []
for loop in range(0, loops):
for bm in benchmarks:
jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops)
jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop,
loops)
random.shuffle(jobs_list, random.SystemRandom().random)
jobset.run(jobs_list, maxjobs=jobs)
jobs_list = []
for loop in range(0, loops):
for bm in benchmarks:
jobs_list += _collect_bm_data(bm, 'opt', name, reps, loop, loops)
jobs_list += _collect_bm_data(bm, 'counters', name, reps, loop,
loops)
random.shuffle(jobs_list, random.SystemRandom().random)
jobset.run(jobs_list, maxjobs=jobs)
if __name__ == '__main__':
args = _args()
run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions)
args = _args()
run(args.name, args.benchmarks, args.jobs, args.loops, args.repetitions)

@ -36,39 +36,39 @@ _THRESHOLD = 1e-10
def scale(a, mul):
return [x * mul for x in a]
return [x * mul for x in a]
def cmp(a, b):
return stats.ttest_ind(a, b)
return stats.ttest_ind(a, b)
def speedup(new, old):
if (len(set(new))) == 1 and new == old: return 0
s0, p0 = cmp(new, old)
if math.isnan(p0): return 0
if s0 == 0: return 0
if p0 > _THRESHOLD: return 0
if s0 < 0:
pct = 1
while pct < 101:
sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
if sp > 0: break
if pp > _THRESHOLD: break
pct += 1
return -(pct - 1)
else:
pct = 1
while pct < 100000:
sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
if sp < 0: break
if pp > _THRESHOLD: break
pct += 1
return pct - 1
if (len(set(new))) == 1 and new == old: return 0
s0, p0 = cmp(new, old)
if math.isnan(p0): return 0
if s0 == 0: return 0
if p0 > _THRESHOLD: return 0
if s0 < 0:
pct = 1
while pct < 101:
sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
if sp > 0: break
if pp > _THRESHOLD: break
pct += 1
return -(pct - 1)
else:
pct = 1
while pct < 100000:
sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
if sp < 0: break
if pp > _THRESHOLD: break
pct += 1
return pct - 1
if __name__ == "__main__":
new = [1.0, 1.0, 1.0, 1.0]
old = [2.0, 2.0, 2.0, 2.0]
print speedup(new, old)
print speedup(old, new)
new = [1.0, 1.0, 1.0, 1.0]
old = [2.0, 2.0, 2.0, 2.0]
print speedup(new, old)
print speedup(old, new)

Loading…
Cancel
Save