diff --git a/tools/run_tests/python_utils/start_port_server.py b/tools/run_tests/python_utils/start_port_server.py index 4103eb0534b..d521aa6a9d9 100644 --- a/tools/run_tests/python_utils/start_port_server.py +++ b/tools/run_tests/python_utils/start_port_server.py @@ -35,6 +35,7 @@ import subprocess import tempfile import sys import time +import jobset def start_port_server(port_server_port): # check if a compatible port server is running @@ -69,7 +70,7 @@ def start_port_server(port_server_port): '-p', '%d' % port_server_port, '-l', logfile] env = dict(os.environ) env['BUILD_ID'] = 'pleaseDontKillMeJenkins' - if platform_string() == 'windows': + if jobset.platform_string() == 'windows': # Working directory of port server needs to be outside of Jenkins # workspace to prevent file lock issues. tempdir = tempfile.mkdtemp() diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py index 42a31a622f7..d51388bbf05 100755 --- a/tools/run_tests/run_microbenchmark.py +++ b/tools/run_tests/run_microbenchmark.py @@ -74,6 +74,7 @@ def link(txt, tgt): benchmarks = [] profile_analysis = [] +cleanup = [] for bm_name in sys.argv[1:]: # generate latency profiles @@ -92,10 +93,20 @@ for bm_name in sys.argv[1:]: 'tools/profiling/latency_profile/profile_analyzer.py', '--source', '%s.trace' % fnize(line), '--fmt', 'simple', '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=None)) - - jobset.run(benchmarks, maxjobs=multiprocessing.cpu_count()/2, - add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) - jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) + cleanup.append('rm', '%s.trace' % fnize(line)) + if len(benchmarks) >= 2 * multiprocessing.cpu_count(): + jobset.run(benchmarks, maxjobs=multiprocessing.cpu_count()/2, + add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) + jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) + jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) + benchmarks = [] + profile_analysis = [] + cleanup = [] + if len(benchmarks): + jobset.run(benchmarks, maxjobs=multiprocessing.cpu_count()/2, + add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) + jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) + jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) # generate flamegraphs heading('Flamegraphs: %s' % bm_name)