Merge pull request #7018 from dcbaker/junit

mtest: Generate JUnit results for tests
pull/7035/head
Jussi Pakkanen 5 years ago committed by GitHub
commit 34e7e8780c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      ci/ciimage/arch/install.sh
  2. 1
      ci/ciimage/eoan/install.sh
  3. 2
      ci/ciimage/fedora/install.sh
  4. 4
      ci/ciimage/opensuse/install.sh
  5. 96
      data/schema.xsd
  6. 4
      docs/markdown/snippets/junit_result_generation.md
  7. 174
      mesonbuild/mtest.py
  8. 25
      run_unittests.py

@ -12,7 +12,7 @@ pkgs=(
itstool gtk3 java-environment=8 gtk-doc llvm clang sdl2 graphviz itstool gtk3 java-environment=8 gtk-doc llvm clang sdl2 graphviz
doxygen vulkan-validation-layers openssh mercurial gtk-sharp-2 qt5-tools doxygen vulkan-validation-layers openssh mercurial gtk-sharp-2 qt5-tools
libwmf valgrind cmake netcdf-fortran openmpi nasm gnustep-base gettext libwmf valgrind cmake netcdf-fortran openmpi nasm gnustep-base gettext
python-jsonschema python-jsonschema python-lxml
# cuda # cuda
) )

@ -11,6 +11,7 @@ export DC=gdc
pkgs=( pkgs=(
python3-pytest-xdist python3-pytest-xdist
python3-pip libxml2-dev libxslt1-dev libyaml-dev libjson-glib-dev python3-pip libxml2-dev libxslt1-dev libyaml-dev libjson-glib-dev
python3-lxml
wget unzip wget unzip
qt5-default clang qt5-default clang
pkg-config-arm-linux-gnueabihf pkg-config-arm-linux-gnueabihf

@ -13,7 +13,7 @@ pkgs=(
doxygen vulkan-devel vulkan-validation-layers-devel openssh mercurial gtk-sharp2-devel libpcap-devel gpgme-devel doxygen vulkan-devel vulkan-validation-layers-devel openssh mercurial gtk-sharp2-devel libpcap-devel gpgme-devel
qt5-qtbase-devel qt5-qttools-devel qt5-linguist qt5-qtbase-private-devel qt5-qtbase-devel qt5-qttools-devel qt5-linguist qt5-qtbase-private-devel
libwmf-devel valgrind cmake openmpi-devel nasm gnustep-base-devel gettext-devel ncurses-devel libwmf-devel valgrind cmake openmpi-devel nasm gnustep-base-devel gettext-devel ncurses-devel
libxml2-devel libxslt-devel libyaml-devel glib2-devel json-glib-devel libxml2-devel libxslt-devel libyaml-devel glib2-devel json-glib-devel python3-lxml
) )
# Sys update # Sys update

@ -5,7 +5,7 @@ set -e
source /ci/common.sh source /ci/common.sh
pkgs=( pkgs=(
python3-setuptools python3-wheel python3-pip python3-pytest-xdist python3 python3-setuptools python3-wheel python3-pip python3-pytest-xdist python3 python3-lxml
ninja make git autoconf automake patch python3-Cython python3-jsonschema ninja make git autoconf automake patch python3-Cython python3-jsonschema
elfutils gcc gcc-c++ gcc-fortran gcc-objc gcc-obj-c++ vala rust bison flex curl elfutils gcc gcc-c++ gcc-fortran gcc-objc gcc-obj-c++ vala rust bison flex curl
mono-core gtkmm3-devel gtest gmock protobuf-devel wxGTK3-3_2-devel gobject-introspection-devel mono-core gtkmm3-devel gtest gmock protobuf-devel wxGTK3-3_2-devel gobject-introspection-devel
@ -17,7 +17,7 @@ pkgs=(
libxml2-devel libxslt-devel libyaml-devel glib2-devel json-glib-devel libxml2-devel libxslt-devel libyaml-devel glib2-devel json-glib-devel
boost-devel libboost_date_time-devel libboost_filesystem-devel libboost_locale-devel libboost_system-devel boost-devel libboost_date_time-devel libboost_filesystem-devel libboost_locale-devel libboost_system-devel
libboost_test-devel libboost_log-devel libboost_regex-devel libboost_test-devel libboost_log-devel libboost_regex-devel
libboost_python-devel libboost_python-py3-1_71_0-devel libboost_regex-devel libboost_python-py3-1_71_0-devel libboost_regex-devel
) )
# Sys update # Sys update

@ -0,0 +1,96 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!-- from https://svn.jenkins-ci.org/trunk/hudson/dtkit/dtkit-format/dtkit-junit-model/src/main/resources/com/thalesgroup/dtkit/junit/model/xsd/junit-4.xsd -->
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="failure">
<xs:complexType mixed="true">
<xs:attribute name="type" type="xs:string" use="optional"/>
<xs:attribute name="message" type="xs:string" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="error">
<xs:complexType mixed="true">
<xs:attribute name="type" type="xs:string" use="optional"/>
<xs:attribute name="message" type="xs:string" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="properties">
<xs:complexType>
<xs:sequence>
<xs:element ref="property" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="property">
<xs:complexType>
<xs:attribute name="name" type="xs:string" use="required"/>
<xs:attribute name="value" type="xs:string" use="required"/>
</xs:complexType>
</xs:element>
<xs:element name="skipped">
<xs:complexType mixed="true">
<xs:attribute name="message" type="xs:string" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="system-err" type="xs:string"/>
<xs:element name="system-out" type="xs:string"/>
<xs:element name="testcase">
<xs:complexType>
<xs:sequence>
<xs:element ref="skipped" minOccurs="0" maxOccurs="1"/>
<xs:element ref="error" minOccurs="0" maxOccurs="unbounded"/>
<xs:element ref="failure" minOccurs="0" maxOccurs="unbounded"/>
<xs:element ref="system-out" minOccurs="0" maxOccurs="unbounded"/>
<xs:element ref="system-err" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
<xs:attribute name="name" type="xs:string" use="required"/>
<xs:attribute name="assertions" type="xs:string" use="optional"/>
<xs:attribute name="time" type="xs:string" use="optional"/>
<xs:attribute name="classname" type="xs:string" use="optional"/>
<xs:attribute name="status" type="xs:string" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="testsuite">
<xs:complexType>
<xs:sequence>
<xs:element ref="properties" minOccurs="0" maxOccurs="1"/>
<xs:element ref="testcase" minOccurs="0" maxOccurs="unbounded"/>
<xs:element ref="system-out" minOccurs="0" maxOccurs="1"/>
<xs:element ref="system-err" minOccurs="0" maxOccurs="1"/>
</xs:sequence>
<xs:attribute name="name" type="xs:string" use="required"/>
<xs:attribute name="tests" type="xs:string" use="required"/>
<xs:attribute name="failures" type="xs:string" use="optional"/>
<xs:attribute name="errors" type="xs:string" use="optional"/>
<xs:attribute name="time" type="xs:string" use="optional"/>
<xs:attribute name="disabled" type="xs:string" use="optional"/>
<xs:attribute name="skipped" type="xs:string" use="optional"/>
<xs:attribute name="timestamp" type="xs:string" use="optional"/>
<xs:attribute name="hostname" type="xs:string" use="optional"/>
<xs:attribute name="id" type="xs:string" use="optional"/>
<xs:attribute name="package" type="xs:string" use="optional"/>
</xs:complexType>
</xs:element>
<xs:element name="testsuites">
<xs:complexType>
<xs:sequence>
<xs:element ref="testsuite" minOccurs="0" maxOccurs="unbounded"/>
</xs:sequence>
<xs:attribute name="name" type="xs:string" use="optional"/>
<xs:attribute name="time" type="xs:string" use="optional"/>
<xs:attribute name="tests" type="xs:string" use="optional"/>
<xs:attribute name="failures" type="xs:string" use="optional"/>
<xs:attribute name="disabled" type="xs:string" use="optional"/>
<xs:attribute name="errors" type="xs:string" use="optional"/>
</xs:complexType>
</xs:element>
</xs:schema>

@ -0,0 +1,4 @@
## Meson test now produces JUnit xml from results
Meson will now generate a JUnit compatible XML file from test results. it
will be in the meson-logs directory and is called testlog.junit.xml.

@ -33,8 +33,10 @@ import signal
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
import textwrap
import time import time
import typing as T import typing as T
import xml.etree.ElementTree as et
from . import build from . import build
from . import environment from . import environment
@ -320,6 +322,110 @@ class TAPParser:
yield self.Error('Too many tests run (expected {}, got {})'.format(plan.count, num_tests)) yield self.Error('Too many tests run (expected {}, got {})'.format(plan.count, num_tests))
class JunitBuilder:
"""Builder for Junit test results.
Junit is impossible to stream out, it requires attributes counting the
total number of tests, failures, skips, and errors in the root element
and in each test suite. As such, we use a builder class to track each
test case, and calculate all metadata before writing it out.
For tests with multiple results (like from a TAP test), we record the
test as a suite with the project_name.test_name. This allows us to track
each result separately. For tests with only one result (such as exit-code
tests) we record each one into a suite with the name project_name. The use
of the project_name allows us to sort subproject tests separately from
the root project.
"""
def __init__(self, filename: str) -> None:
self.filename = filename
self.root = et.Element(
'testsuites', tests='0', errors='0', failures='0')
self.suites = {} # type: T.Dict[str, et.Element]
def log(self, name: str, test: 'TestRun') -> None:
"""Log a single test case."""
# In this case we have a test binary with multiple results.
# We want to record this so that each result is recorded
# separately
if test.results:
suitename = '{}.{}'.format(test.project, name)
assert suitename not in self.suites, 'duplicate suite'
suite = self.suites[suitename] = et.Element(
'testsuite',
name=suitename,
tests=str(len(test.results)),
errors=str(sum(1 for r in test.results if r is TestResult.ERROR)),
failures=str(sum(1 for r in test.results if r in
{TestResult.FAIL, TestResult.UNEXPECTEDPASS, TestResult.TIMEOUT})),
skipped=str(sum(1 for r in test.results if r is TestResult.SKIP)),
)
for i, result in enumerate(test.results):
# Both name and classname are required. Set them both to the
# number of the test in a TAP test, as TAP doesn't give names.
testcase = et.SubElement(suite, 'testcase', name=str(i), classname=str(i))
if result is TestResult.SKIP:
et.SubElement(testcase, 'skipped')
elif result is TestResult.ERROR:
et.SubElement(testcase, 'error')
elif result is TestResult.FAIL:
et.SubElement(testcase, 'failure')
elif result is TestResult.UNEXPECTEDPASS:
fail = et.SubElement(testcase, 'failure')
fail.text = 'Test unexpected passed.'
elif result is TestResult.TIMEOUT:
fail = et.SubElement(testcase, 'failure')
fail.text = 'Test did not finish before configured timeout.'
if test.stdo:
out = et.SubElement(suite, 'system-out')
out.text = test.stdo.rstrip()
if test.stde:
err = et.SubElement(suite, 'system-err')
err.text = test.stde.rstrip()
else:
if test.project not in self.suites:
suite = self.suites[test.project] = et.Element(
'testsuite', name=test.project, tests='1', errors='0',
failures='0', skipped='0')
else:
suite = self.suites[test.project]
suite.attrib['tests'] = str(int(suite.attrib['tests']) + 1)
testcase = et.SubElement(suite, 'testcase', name=name, classname=name)
if test.res is TestResult.SKIP:
et.SubElement(testcase, 'skipped')
suite.attrib['skipped'] = str(int(suite.attrib['skipped']) + 1)
elif test.res is TestResult.ERROR:
et.SubElement(testcase, 'error')
suite.attrib['errors'] = str(int(suite.attrib['errors']) + 1)
elif test.res is TestResult.FAIL:
et.SubElement(testcase, 'failure')
suite.attrib['failures'] = str(int(suite.attrib['failures']) + 1)
if test.stdo:
out = et.SubElement(testcase, 'system-out')
out.text = test.stdo.rstrip()
if test.stde:
err = et.SubElement(testcase, 'system-err')
err.text = test.stde.rstrip()
def write(self) -> None:
"""Calculate total test counts and write out the xml result."""
for suite in self.suites.values():
self.root.append(suite)
# Skipped is really not allowed in the "testsuits" element
for attr in ['tests', 'errors', 'failures']:
self.root.attrib[attr] = str(int(self.root.attrib[attr]) + int(suite.attrib[attr]))
tree = et.ElementTree(self.root)
with open(self.filename, 'wb') as f:
tree.write(f, encoding='utf-8', xml_declaration=True)
class TestRun: class TestRun:
@classmethod @classmethod
@ -335,30 +441,29 @@ class TestRun:
res = TestResult.EXPECTEDFAIL if bool(returncode) else TestResult.UNEXPECTEDPASS res = TestResult.EXPECTEDFAIL if bool(returncode) else TestResult.UNEXPECTEDPASS
else: else:
res = TestResult.FAIL if bool(returncode) else TestResult.OK res = TestResult.FAIL if bool(returncode) else TestResult.OK
return cls(test, test_env, res, returncode, starttime, duration, stdo, stde, cmd) return cls(test, test_env, res, [], returncode, starttime, duration, stdo, stde, cmd)
@classmethod @classmethod
def make_tap(cls, test: 'TestSerialisation', test_env: T.Dict[str, str], def make_tap(cls, test: 'TestSerialisation', test_env: T.Dict[str, str],
returncode: int, starttime: float, duration: float, returncode: int, starttime: float, duration: float,
stdo: str, stde: str, stdo: str, stde: str,
cmd: T.Optional[T.List[str]]) -> 'TestRun': cmd: T.Optional[T.List[str]]) -> 'TestRun':
res = None res = None # T.Optional[TestResult]
num_tests = 0 results = [] # T.List[TestResult]
failed = False failed = False
num_skipped = 0
for i in TAPParser(io.StringIO(stdo)).parse(): for i in TAPParser(io.StringIO(stdo)).parse():
if isinstance(i, TAPParser.Bailout): if isinstance(i, TAPParser.Bailout):
res = TestResult.ERROR results.append(TestResult.ERROR)
failed = True
elif isinstance(i, TAPParser.Test): elif isinstance(i, TAPParser.Test):
if i.result == TestResult.SKIP: results.append(i.result)
num_skipped += 1 if i.result not in {TestResult.OK, TestResult.EXPECTEDFAIL}:
elif i.result in (TestResult.FAIL, TestResult.UNEXPECTEDPASS):
failed = True failed = True
num_tests += 1
elif isinstance(i, TAPParser.Error): elif isinstance(i, TAPParser.Error):
res = TestResult.ERROR results.append(TestResult.ERROR)
stde += '\nTAP parsing error: ' + i.message stde += '\nTAP parsing error: ' + i.message
failed = True
if returncode != 0: if returncode != 0:
res = TestResult.ERROR res = TestResult.ERROR
@ -366,7 +471,7 @@ class TestRun:
if res is None: if res is None:
# Now determine the overall result of the test based on the outcome of the subcases # Now determine the overall result of the test based on the outcome of the subcases
if num_skipped == num_tests: if all(t is TestResult.SKIP for t in results):
# This includes the case where num_tests is zero # This includes the case where num_tests is zero
res = TestResult.SKIP res = TestResult.SKIP
elif test.should_fail: elif test.should_fail:
@ -374,14 +479,16 @@ class TestRun:
else: else:
res = TestResult.FAIL if failed else TestResult.OK res = TestResult.FAIL if failed else TestResult.OK
return cls(test, test_env, res, returncode, starttime, duration, stdo, stde, cmd) return cls(test, test_env, res, results, returncode, starttime, duration, stdo, stde, cmd)
def __init__(self, test: 'TestSerialisation', test_env: T.Dict[str, str], def __init__(self, test: 'TestSerialisation', test_env: T.Dict[str, str],
res: TestResult, returncode: int, starttime: float, duration: float, res: TestResult, results: T.List[TestResult], returncode:
int, starttime: float, duration: float,
stdo: T.Optional[str], stde: T.Optional[str], stdo: T.Optional[str], stde: T.Optional[str],
cmd: T.Optional[T.List[str]]): cmd: T.Optional[T.List[str]]):
assert isinstance(res, TestResult) assert isinstance(res, TestResult)
self.res = res self.res = res
self.results = results # May be an empty list
self.returncode = returncode self.returncode = returncode
self.starttime = starttime self.starttime = starttime
self.duration = duration self.duration = duration
@ -390,6 +497,7 @@ class TestRun:
self.cmd = cmd self.cmd = cmd
self.env = test_env self.env = test_env
self.should_fail = test.should_fail self.should_fail = test.should_fail
self.project = test.project_name
def get_log(self) -> str: def get_log(self) -> str:
res = '--- command ---\n' res = '--- command ---\n'
@ -490,7 +598,7 @@ class SingleTestRunner:
cmd = self._get_cmd() cmd = self._get_cmd()
if cmd is None: if cmd is None:
skip_stdout = 'Not run because can not execute cross compiled binaries.' skip_stdout = 'Not run because can not execute cross compiled binaries.'
return TestRun(self.test, self.test_env, TestResult.SKIP, GNU_SKIP_RETURNCODE, time.time(), 0.0, skip_stdout, None, None) return TestRun(self.test, self.test_env, TestResult.SKIP, [], GNU_SKIP_RETURNCODE, time.time(), 0.0, skip_stdout, None, None)
else: else:
wrap = TestHarness.get_wrapper(self.options) wrap = TestHarness.get_wrapper(self.options)
if self.options.gdb: if self.options.gdb:
@ -633,7 +741,7 @@ class SingleTestRunner:
stdo = "" stdo = ""
stde = additional_error stde = additional_error
if timed_out: if timed_out:
return TestRun(self.test, self.test_env, TestResult.TIMEOUT, p.returncode, starttime, duration, stdo, stde, cmd) return TestRun(self.test, self.test_env, TestResult.TIMEOUT, [], p.returncode, starttime, duration, stdo, stde, cmd)
else: else:
if self.test.protocol == 'exitcode': if self.test.protocol == 'exitcode':
return TestRun.make_exitcode(self.test, self.test_env, p.returncode, starttime, duration, stdo, stde, cmd) return TestRun.make_exitcode(self.test, self.test_env, p.returncode, starttime, duration, stdo, stde, cmd)
@ -655,9 +763,11 @@ class TestHarness:
self.timeout_count = 0 self.timeout_count = 0
self.is_run = False self.is_run = False
self.tests = None self.tests = None
self.results = [] # type: T.List[TestRun]
self.logfilename = None # type: T.Optional[str] self.logfilename = None # type: T.Optional[str]
self.logfile = None # type: T.Optional[T.TextIO] self.logfile = None # type: T.Optional[T.TextIO]
self.jsonlogfile = None # type: T.Optional[T.TextIO] self.jsonlogfile = None # type: T.Optional[T.TextIO]
self.junit = None # type: T.Optional[JunitBuilder]
if self.options.benchmark: if self.options.benchmark:
self.tests = load_benchmarks(options.wd) self.tests = load_benchmarks(options.wd)
else: else:
@ -678,12 +788,11 @@ class TestHarness:
self.close_logfiles() self.close_logfiles()
def close_logfiles(self) -> None: def close_logfiles(self) -> None:
if self.logfile: for f in ['logfile', 'jsonlogfile']:
self.logfile.close() lfile = getattr(self, f)
self.logfile = None if lfile:
if self.jsonlogfile: lfile.close()
self.jsonlogfile.close() setattr(self, f, None)
self.jsonlogfile = None
def merge_suite_options(self, options: argparse.Namespace, test: 'TestSerialisation') -> T.Dict[str, str]: def merge_suite_options(self, options: argparse.Namespace, test: 'TestSerialisation') -> T.Dict[str, str]:
if ':' in options.setup: if ':' in options.setup:
@ -773,20 +882,24 @@ class TestHarness:
self.logfile.write(result_str) self.logfile.write(result_str)
if self.jsonlogfile: if self.jsonlogfile:
write_json_log(self.jsonlogfile, name, result) write_json_log(self.jsonlogfile, name, result)
if self.junit:
self.junit.log(name, result)
def print_summary(self) -> None: def print_summary(self) -> None:
msg = ''' msg = textwrap.dedent('''
Ok: {:<4} Ok: {:<4}
Expected Fail: {:<4} Expected Fail: {:<4}
Fail: {:<4} Fail: {:<4}
Unexpected Pass: {:<4} Unexpected Pass: {:<4}
Skipped: {:<4} Skipped: {:<4}
Timeout: {:<4} Timeout: {:<4}
'''.format(self.success_count, self.expectedfail_count, self.fail_count, ''').format(self.success_count, self.expectedfail_count, self.fail_count,
self.unexpectedpass_count, self.skip_count, self.timeout_count) self.unexpectedpass_count, self.skip_count, self.timeout_count)
print(msg) print(msg)
if self.logfile: if self.logfile:
self.logfile.write(msg) self.logfile.write(msg)
if self.junit:
self.junit.write()
def print_collected_logs(self) -> None: def print_collected_logs(self) -> None:
if len(self.collected_logs) > 0: if len(self.collected_logs) > 0:
@ -903,6 +1016,9 @@ Timeout: {:<4}
if namebase: if namebase:
logfile_base += '-' + namebase.replace(' ', '_') logfile_base += '-' + namebase.replace(' ', '_')
self.junit = JunitBuilder(logfile_base + '.junit.xml')
self.logfilename = logfile_base + '.txt' self.logfilename = logfile_base + '.txt'
self.jsonlogfilename = logfile_base + '.json' self.jsonlogfilename = logfile_base + '.json'

@ -4617,6 +4617,31 @@ recommended as it is not supported on some platforms''')
out = self.build() out = self.build()
self.assertNotIn('Project configured', out) self.assertNotIn('Project configured', out)
def _test_junit(self, case: str) -> None:
try:
import lxml.etree as et
except ImportError:
raise unittest.SkipTest('lxml required, but not found.')
schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd')))
testdir = os.path.join(self.common_test_dir, case)
self.init(testdir)
self.run_tests()
junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml'))
try:
schema.assertValid(junit)
except et.DocumentInvalid as e:
self.fail(e.error_log)
def test_junit_valid_tap(self):
self._test_junit('213 tap tests')
def test_junit_valid_exitcode(self):
self._test_junit('44 test args')
class FailureTests(BasePlatformTests): class FailureTests(BasePlatformTests):
''' '''
Tests that test failure conditions. Build files here should be dynamically Tests that test failure conditions. Build files here should be dynamically

Loading…
Cancel
Save