Change jenkins/run_performance.sh to use microbenchmarking

pull/10079/head
Matt Kwong 8 years ago
parent aff1c05ed7
commit d0ee10df9b
  1. 7
      tools/jenkins/run_performance.sh
  2. 30
      tools/run_tests/run_microbenchmark.py

@ -31,7 +31,12 @@
# This script is invoked by Jenkins and runs performance smoke test.
set -ex
# List of benchmarks that provide good signal for analyzing performance changes in pull requests
BENCHMARKS_TO_RUN="bm_closure bm_cq bm_call_create bm_error bm_chttp2_hpack bm_metadata"
# Enter the gRPC repo root
cd $(dirname $0)/../..
tools/run_tests/run_performance_tests.py -l c++ node ruby csharp python --netperf --category smoketest
# tools/run_tests/run_performance_tests.py -l c++ node ruby csharp python --netperf --category smoketest
# todo(mattkwong): Change performance test to use microbenchmarking
tools/run_tests/run_microbenchmark.py -c summary --diff_perf origin/$ghprbTargetBranch -b $BENCHMARKS_TO_RUN

@ -38,6 +38,17 @@ import argparse
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
_AVAILABLE_BENCHMARK_TESTS = ['bm_fullstack_unary_ping_pong',
'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump',
'bm_closure',
'bm_cq',
'bm_call_create',
'bm_error',
'bm_chttp2_hpack',
'bm_metadata',
'bm_fullstack_trickle']
flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
@ -201,17 +212,8 @@ argp.add_argument('-c', '--collect',
default=sorted(collectors.keys()),
help='Which collectors should be run against each benchmark')
argp.add_argument('-b', '--benchmarks',
default=['bm_fullstack_unary_ping_pong',
'bm_fullstack_streaming_ping_pong',
'bm_fullstack_streaming_pump',
'bm_closure',
'bm_cq',
'bm_call_create',
'bm_error',
'bm_chttp2_hpack',
'bm_metadata',
'bm_fullstack_trickle',
],
choices=_AVAILABLE_BENCHMARK_TESTS,
default=_AVAILABLE_BENCHMARK_TESTS,
nargs='+',
type=str,
help='Which microbenchmarks should be run')
@ -229,20 +231,20 @@ argp.add_argument('--summary_time',
type=int,
help='Minimum time to run benchmarks for the summary collection')
args = argp.parse_args()
if args.diff_perf:
git_comment = ''
try:
for collect in args.collect:
for bm_name in args.benchmarks:
collectors[collect](bm_name, args)
if args.diff_perf:
git_comment = 'Performance differences between this PR and %s\\n' % args.diff_perf
if 'summary' not in args.collect:
for bm_name in args.benchmarks:
run_summary(bm_name, 'opt', bm_name)
run_summary(bm_name, 'counters', bm_name)
where_am_i = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
subprocess.check_call(['git', 'checkout', args.diff_perf])
# todo(mattkwong): uncomment this before merging
# subprocess.check_call(['git', 'checkout', args.diff_perf])
comparables = []
subprocess.check_call(['make', 'clean'])
try:

Loading…
Cancel
Save