Merge pull request #8352 from jtattermusch/improve_reporting

More flexible test naming in XML reports
pull/8369/head
Jan Tattermusch 8 years ago committed by GitHub
commit 0fd76ab55f
  1. 7
      tools/run_tests/report_utils.py
  2. 8
      tools/run_tests/run_tests.py
  3. 9
      tools/run_tests/run_tests_matrix.py

@ -57,11 +57,12 @@ def _filter_msg(msg, output_format):
return msg
def render_junit_xml_report(resultset, xml_report):
def render_junit_xml_report(resultset, xml_report, suite_package='grpc',
suite_name='tests'):
"""Generate JUnit-like XML report."""
root = ET.Element('testsuites')
testsuite = ET.SubElement(root, 'testsuite', id='1', package='grpc',
name='tests')
testsuite = ET.SubElement(root, 'testsuite', id='1', package=suite_package,
name=suite_name)
for shortname, results in resultset.items():
for result in results:
xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)

@ -1015,6 +1015,8 @@ argp.add_argument('--update_submodules', default=[], nargs='*',
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist')
args = argp.parse_args()
@ -1327,7 +1329,8 @@ def _build_and_run(
if build_only:
if xml_report:
report_utils.render_junit_xml_report(resultset, xml_report)
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
return []
# start antagonists
@ -1379,7 +1382,8 @@ def _build_and_run(
for antagonist in antagonists:
antagonist.kill()
if xml_report and resultset:
report_utils.render_junit_xml_report(resultset, xml_report)
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps, maxjobs=1, stop_on_failure=True,

@ -55,7 +55,8 @@ def _docker_jobspec(name, runtests_args=[]):
'--use_docker',
'-t',
'-j', str(_INNER_JOBS),
'-x', 'report_%s.xml' % name] + runtests_args,
'-x', 'report_%s.xml' % name,
'--report_suite_name', '%s' % name] + runtests_args,
shortname='run_tests_%s' % name,
timeout_seconds=_RUNTESTS_TIMEOUT)
return test_job
@ -70,7 +71,8 @@ def _workspace_jobspec(name, runtests_args=[], workspace_name=None):
cmdline=['tools/run_tests/run_tests_in_workspace.sh',
'-t',
'-j', str(_INNER_JOBS),
'-x', '../report_%s.xml' % name] + runtests_args,
'-x', '../report_%s.xml' % name,
'--report_suite_name', '%s' % name] + runtests_args,
environ=env,
shortname='run_tests_%s' % name,
timeout_seconds=_RUNTESTS_TIMEOUT)
@ -271,7 +273,8 @@ num_failures, resultset = jobset.run(jobs,
newline_on_success=True,
travis=True,
maxjobs=args.jobs)
report_utils.render_junit_xml_report(resultset, 'report.xml')
report_utils.render_junit_xml_report(resultset, 'report.xml',
suite_name='aggregate_tests')
if num_failures == 0:
jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',

Loading…
Cancel
Save