Merge pull request #13028 from ctiller/tfix

Working benchmark parellization
reviewable/pr12644/r6^2
Craig Tiller 7 years ago committed by GitHub
commit 457c68476d
  1. 14
      build.yaml
  2. 1
      templates/tools/run_tests/generated/tests.json.template
  3. 202
      tools/run_tests/generated/tests.json
  4. 30
      tools/run_tests/run_tests.py

@ -3555,6 +3555,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3577,6 +3578,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3599,6 +3601,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3621,6 +3624,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3642,6 +3646,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3663,6 +3668,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3684,6 +3690,7 @@ targets:
- gpr
args:
- --benchmark_min_time=4
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3705,6 +3712,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3729,6 +3737,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
excluded_poll_engines:
- poll
@ -3756,6 +3765,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
excluded_poll_engines:
- poll
@ -3782,6 +3792,7 @@ targets:
- grpc++_test_config
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
excluded_poll_engines:
- poll
@ -3809,6 +3820,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
excluded_poll_engines:
- poll
@ -3834,6 +3846,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac
@ -3856,6 +3869,7 @@ targets:
- gpr
args:
- --benchmark_min_time=0
benchmark: true
defaults: benchmark
platforms:
- mac

@ -9,6 +9,7 @@
"platforms": tgt.platforms,
"ci_platforms": tgt.ci_platforms,
"gtest": tgt.gtest,
"benchmark": tgt.get("benchmark", False),
"exclude_configs": tgt.get("exclude_configs", []),
"exclude_iomgrs": tgt.get("exclude_iomgrs", []),
"args": tgt.get("args", []),

File diff suppressed because it is too large Load Diff

@ -149,10 +149,8 @@ class Config(object):
for k, v in environ.items():
actual_environ[k] = v
if not flaky and shortname and shortname in flaky_tests:
print('Setting %s to flaky' % shortname)
flaky = True
if shortname in shortname_to_cpu:
print('Update CPU cost for %s: %f -> %f' % (shortname, cpu_cost, shortname_to_cpu[shortname]))
cpu_cost = shortname_to_cpu[shortname]
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
@ -332,11 +330,29 @@ class CLanguage(object):
if cpu_cost == 'capacity':
cpu_cost = multiprocessing.cpu_count()
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
# complete list of the tests contained in a binary
# for each test, we then add a job to run, filtering for just that
# test
list_test_command = None
filter_test_command = None
# these are the flag defined by gtest and benchmark framework to list
# and filter test runs. We use them to split each individual test
# into its own JobSpec, and thus into its own process.
if 'benchmark' in target and target['benchmark']:
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--benchmark_list_tests'],
stderr=fnull)
base = None
for line in tests.split('\n'):
test = line.strip()
cmdline = [binary, '--benchmark_filter=%s$' % test] + target['args']
out.append(self.config.job_spec(cmdline,
shortname='%s:%s %s' % (binary, test, shortname_ext),
cpu_cost=cpu_cost,
timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
environ=env))
elif 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a complete
# list of the tests contained in a binary for each test, we then
# add a job to run, filtering for just that test.
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--gtest_list_tests'],
stderr=fnull)

Loading…
Cancel
Save