diff --git a/tools/profiling/microbenchmarks/bm2bq.py b/tools/profiling/microbenchmarks/bm2bq.py index 49dc39009f5..705f1babfae 100755 --- a/tools/profiling/microbenchmarks/bm2bq.py +++ b/tools/profiling/microbenchmarks/bm2bq.py @@ -27,6 +27,7 @@ import bm_json columns = [] for row in json.loads( + # TODO(jtattermusch): make sure the dataset name is not hardcoded subprocess.check_output( ['bq', '--format=json', 'show', 'microbenchmarks.microbenchmarks']))['schema']['fields']: @@ -40,6 +41,8 @@ SANITIZE = { 'timestamp': str, } +# TODO(jtattermusch): add proper argparse argument, rather than trying +# to emulate with manual argv inspection. if sys.argv[1] == '--schema': print(',\n'.join('%s:%s' % (k, t.upper()) for k, t in columns)) sys.exit(0) diff --git a/tools/profiling/microbenchmarks/bm_json.py b/tools/profiling/microbenchmarks/bm_json.py index e9c3339cf92..821b8c5fb69 100644 --- a/tools/profiling/microbenchmarks/bm_json.py +++ b/tools/profiling/microbenchmarks/bm_json.py @@ -210,6 +210,11 @@ def expand_json(js, js2=None): row.update(bm) row.update(parse_name(row['name'])) row.update(labels) + # TODO(jtattermusch): add a comment explaining what's the point + # of merging values of some of the columns js2 into the row. + # Empirically, the js contains data from "counters" config + # and js2 contains data from the "opt" config, but the point of merging + # really deserves further explanation. if js2: for bm2 in js2['benchmarks']: if bm['name'] == bm2['name'] and 'already_used' not in bm2: diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py index 6179f961d55..7249dab6748 100755 --- a/tools/run_tests/run_microbenchmark.py +++ b/tools/run_tests/run_microbenchmark.py @@ -203,10 +203,24 @@ def run_summary(bm_name, cfg, base_json_name): def collect_summary(bm_name, args): - heading('Summary: %s [no counters]' % bm_name) - text(run_summary(bm_name, 'opt', bm_name)) - heading('Summary: %s [with counters]' % bm_name) - text(run_summary(bm_name, 'counters', bm_name)) + # no counters, run microbenchmark and add summary + # both to HTML report and to console. + nocounters_heading = 'Summary: %s [no counters]' % bm_name + nocounters_summary = run_summary(bm_name, 'opt', bm_name) + heading(nocounters_heading) + text(nocounters_summary) + print(nocounters_heading) + print(nocounters_summary) + + # with counters, run microbenchmark and add summary + # both to HTML report and to console. + counters_heading = 'Summary: %s [with counters]' % bm_name + counters_summary = run_summary(bm_name, 'counters', bm_name) + heading(counters_heading) + text(counters_summary) + print(counters_heading) + print(counters_summary) + if args.bq_result_table: with open('%s.csv' % bm_name, 'w') as f: f.write(