Merge pull request #20206 from jtattermusch/sponge_targets_per_suite

Generate separate sponge target for each test suite for more readable test results.
pull/20235/head
Jan Tattermusch 5 years ago committed by GitHub
commit 1bfdbc1f6c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 26
      tools/run_tests/python_utils/report_utils.py
  2. 12
      tools/run_tests/run_tests.py
  3. 61
      tools/run_tests/run_tests_matrix.py

@ -49,12 +49,28 @@ def render_junit_xml_report(resultset,
report_file,
suite_package='grpc',
suite_name='tests',
replace_dots=True):
replace_dots=True,
multi_target=False):
"""Generate JUnit-like XML report."""
tree = new_junit_xml_tree()
append_junit_xml_results(tree, resultset, suite_package, suite_name, '1',
replace_dots)
create_xml_report_file(tree, report_file)
if not multi_target:
tree = new_junit_xml_tree()
append_junit_xml_results(tree, resultset, suite_package, suite_name,
'1', replace_dots)
create_xml_report_file(tree, report_file)
else:
# To have each test result displayed as a separate target by the Resultstore/Sponge UI,
# we generate a separate XML report file for each test result
for shortname, results in six.iteritems(resultset):
one_result = {shortname: results}
tree = new_junit_xml_tree()
append_junit_xml_results(tree, one_result, '%s_%s' % (suite_package,
shortname),
'%s_%s' % (suite_name,
shortname), '1', replace_dots)
per_suite_report_file = os.path.join(
os.path.dirname(report_file), shortname,
os.path.basename(report_file))
create_xml_report_file(tree, per_suite_report_file)
def create_xml_report_file(tree, report_file):

@ -1472,6 +1472,13 @@ argp.add_argument(
default='tests',
type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument(
'--report_multi_target',
default=False,
const=True,
action='store_const',
help='Generate separate XML report for each test job (Looks better in UIs).'
)
argp.add_argument(
'--quiet_success',
default=False,
@ -1880,7 +1887,10 @@ def _build_and_run(check_cancelled,
upload_extra_fields)
if xml_report and resultset:
report_utils.render_junit_xml_report(
resultset, xml_report, suite_name=args.report_suite_name)
resultset,
xml_report,
suite_name=args.report_suite_name,
multi_target=args.report_multi_target)
number_failures, _ = jobset.run(
post_tests_steps,

@ -43,6 +43,11 @@ _OBJC_RUNTESTS_TIMEOUT = 90 * 60
# Number of jobs assigned to each run_tests.py instance
_DEFAULT_INNER_JOBS = 2
# Name of the top-level umbrella report that includes all the run_tests.py invocations
# Note that the starting letter 't' matters so that the targets are listed AFTER
# the per-test breakdown items that start with 'run_tests/' (it is more readable that way)
_MATRIX_REPORT_NAME = 'toplevel_run_tests_invocations'
def _safe_report_name(name):
"""Reports with '+' in target name won't show correctly in ResultStore"""
@ -55,11 +60,16 @@ def _report_filename(name):
return '%s/%s' % (_safe_report_name(name), 'sponge_log.xml')
def _report_logfilename(name):
"""Generates log file name that corresponds to name generated by _report_filename"""
def _matrix_job_logfilename(shortname_for_multi_target):
"""Generate location for log file that will match the sponge_log.xml from the top-level matrix report."""
# 'sponge_log.log' suffix must be there for log to get recognized as "target log"
# for the corresponding 'sponge_log.xml' report.
return '%s/%s' % (_safe_report_name(name), 'sponge_log.log')
# the shortname_for_multi_target component must be set to match the sponge_log.xml location
# because the top-level render_junit_xml_report is called with multi_target=True
s = '%s/%s/%s' % (_MATRIX_REPORT_NAME, shortname_for_multi_target,
'sponge_log.log')
print(s)
return s
def _docker_jobspec(name,
@ -70,18 +80,19 @@ def _docker_jobspec(name,
"""Run a single instance of run_tests.py in a docker container"""
if not timeout_seconds:
timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
shortname = 'run_tests_%s' % name
test_job = jobset.JobSpec(
cmdline=[
'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t',
'-j',
str(inner_jobs), '-x',
_report_filename(name), '--report_suite_name',
'run_tests/%s' % _report_filename(name), '--report_suite_name',
'%s' % _safe_report_name(name)
] + runtests_args,
environ=runtests_envs,
shortname='run_tests_%s' % name,
shortname=shortname,
timeout_seconds=timeout_seconds,
logfilename=_report_logfilename(name))
logfilename=_matrix_job_logfilename(shortname))
return test_job
@ -96,6 +107,7 @@ def _workspace_jobspec(name,
workspace_name = 'workspace_%s' % name
if not timeout_seconds:
timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
shortname = 'run_tests_%s' % name
env = {'WORKSPACE_NAME': workspace_name}
env.update(runtests_envs)
test_job = jobset.JobSpec(
@ -103,13 +115,13 @@ def _workspace_jobspec(name,
'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
'-t', '-j',
str(inner_jobs), '-x',
'../%s' % _report_filename(name), '--report_suite_name',
'../run_tests/%s' % _report_filename(name), '--report_suite_name',
'%s' % _safe_report_name(name)
] + runtests_args,
environ=env,
shortname='run_tests_%s' % name,
shortname=shortname,
timeout_seconds=timeout_seconds,
logfilename=_report_logfilename(name))
logfilename=_matrix_job_logfilename(shortname))
return test_job
@ -175,7 +187,7 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
configs=['dbg', 'opt'],
platforms=['linux'],
labels=['basictests'],
extra_args=extra_args,
extra_args=extra_args + ['--report_multi_target'],
inner_jobs=inner_jobs)
# supported on linux only
@ -184,7 +196,7 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
configs=['dbg', 'opt'],
platforms=['linux'],
labels=['basictests', 'multilang'],
extra_args=extra_args,
extra_args=extra_args + ['--report_multi_target'],
inner_jobs=inner_jobs)
# supported on all platforms.
@ -193,7 +205,8 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
configs=['dbg', 'opt'],
platforms=['linux', 'macos', 'windows'],
labels=['basictests', 'corelang'],
extra_args=extra_args,
extra_args=
extra_args, # don't use multi_target report because C has too many test cases
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
@ -203,7 +216,7 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
configs=['dbg', 'opt'],
platforms=['linux', 'macos', 'windows'],
labels=['basictests', 'multilang'],
extra_args=extra_args,
extra_args=extra_args + ['--report_multi_target'],
inner_jobs=inner_jobs)
# C# tests on .NET core
test_jobs += _generate_jobs(
@ -213,7 +226,7 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
arch='default',
compiler='coreclr',
labels=['basictests', 'multilang'],
extra_args=extra_args,
extra_args=extra_args + ['--report_multi_target'],
inner_jobs=inner_jobs)
test_jobs += _generate_jobs(
@ -222,7 +235,7 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
platforms=['linux', 'macos', 'windows'],
iomgr_platforms=['native', 'gevent'],
labels=['basictests', 'multilang'],
extra_args=extra_args,
extra_args=extra_args + ['--report_multi_target'],
inner_jobs=inner_jobs)
# supported on linux and mac.
@ -231,7 +244,8 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
configs=['dbg', 'opt'],
platforms=['linux', 'macos'],
labels=['basictests', 'corelang'],
extra_args=extra_args,
extra_args=
extra_args, # don't use multi_target report because C++ has too many test cases
inner_jobs=inner_jobs,
timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
@ -240,7 +254,7 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
configs=['dbg', 'opt'],
platforms=['linux', 'macos'],
labels=['basictests', 'multilang'],
extra_args=extra_args,
extra_args=extra_args + ['--report_multi_target'],
inner_jobs=inner_jobs)
# supported on mac only.
@ -249,7 +263,7 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
configs=['opt'],
platforms=['macos'],
labels=['basictests', 'multilang'],
extra_args=extra_args,
extra_args=extra_args + ['--report_multi_target'],
inner_jobs=inner_jobs,
timeout_seconds=_OBJC_RUNTESTS_TIMEOUT)
@ -400,7 +414,7 @@ def _create_portability_test_jobs(extra_args=[],
arch='default',
compiler='python_alpine',
labels=['portability', 'multilang'],
extra_args=extra_args,
extra_args=extra_args + ['--report_multi_target'],
inner_jobs=inner_jobs)
# TODO(jtattermusch): a large portion of the libuv tests is failing,
@ -604,17 +618,18 @@ if __name__ == "__main__":
resultset.update(skipped_results)
report_utils.render_junit_xml_report(
resultset,
_report_filename('aggregate_tests'),
suite_name='aggregate_tests')
_report_filename(_MATRIX_REPORT_NAME),
suite_name=_MATRIX_REPORT_NAME,
multi_target=True)
if num_failures == 0:
jobset.message(
'SUCCESS',
'All run_tests.py instance finished successfully.',
'All run_tests.py instances finished successfully.',
do_newline=True)
else:
jobset.message(
'FAILED',
'Some run_tests.py instance have failed.',
'Some run_tests.py instances have failed.',
do_newline=True)
sys.exit(1)

Loading…
Cancel
Save