From 959c1becd63ff8ccbc106755574af9bb49392b08 Mon Sep 17 00:00:00 2001 From: Dylan Baker Date: Wed, 22 Apr 2020 13:54:27 -0700 Subject: [PATCH 1/5] ci: install python3-lxml This will be used by the junit validation tests. --- ci/ciimage/arch/install.sh | 2 +- ci/ciimage/eoan/install.sh | 1 + ci/ciimage/fedora/install.sh | 2 +- ci/ciimage/opensuse/install.sh | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ci/ciimage/arch/install.sh b/ci/ciimage/arch/install.sh index 7fe139edc..6cbbb27b0 100755 --- a/ci/ciimage/arch/install.sh +++ b/ci/ciimage/arch/install.sh @@ -12,7 +12,7 @@ pkgs=( itstool gtk3 java-environment=8 gtk-doc llvm clang sdl2 graphviz doxygen vulkan-validation-layers openssh mercurial gtk-sharp-2 qt5-tools libwmf valgrind cmake netcdf-fortran openmpi nasm gnustep-base gettext - python-jsonschema + python-jsonschema python-lxml # cuda ) diff --git a/ci/ciimage/eoan/install.sh b/ci/ciimage/eoan/install.sh index 4b3b7461e..7d7a1fde8 100755 --- a/ci/ciimage/eoan/install.sh +++ b/ci/ciimage/eoan/install.sh @@ -11,6 +11,7 @@ export DC=gdc pkgs=( python3-pytest-xdist python3-pip libxml2-dev libxslt1-dev libyaml-dev libjson-glib-dev + python3-lxml wget unzip qt5-default clang pkg-config-arm-linux-gnueabihf diff --git a/ci/ciimage/fedora/install.sh b/ci/ciimage/fedora/install.sh index 242d6770e..f61d97e01 100755 --- a/ci/ciimage/fedora/install.sh +++ b/ci/ciimage/fedora/install.sh @@ -13,7 +13,7 @@ pkgs=( doxygen vulkan-devel vulkan-validation-layers-devel openssh mercurial gtk-sharp2-devel libpcap-devel gpgme-devel qt5-qtbase-devel qt5-qttools-devel qt5-linguist qt5-qtbase-private-devel libwmf-devel valgrind cmake openmpi-devel nasm gnustep-base-devel gettext-devel ncurses-devel - libxml2-devel libxslt-devel libyaml-devel glib2-devel json-glib-devel + libxml2-devel libxslt-devel libyaml-devel glib2-devel json-glib-devel python3-lxml ) # Sys update diff --git a/ci/ciimage/opensuse/install.sh b/ci/ciimage/opensuse/install.sh index c5dd6df1f..79be8a117 100755 --- a/ci/ciimage/opensuse/install.sh +++ b/ci/ciimage/opensuse/install.sh @@ -5,7 +5,7 @@ set -e source /ci/common.sh pkgs=( - python3-setuptools python3-wheel python3-pip python3-pytest-xdist python3 + python3-setuptools python3-wheel python3-pip python3-pytest-xdist python3 python3-lxml ninja make git autoconf automake patch python3-Cython python3-jsonschema elfutils gcc gcc-c++ gcc-fortran gcc-objc gcc-obj-c++ vala rust bison flex curl mono-core gtkmm3-devel gtest gmock protobuf-devel wxGTK3-3_2-devel gobject-introspection-devel From 4dcbb9254a586f59616ddf02bfd508ab76c47f2b Mon Sep 17 00:00:00 2001 From: Dylan Baker Date: Tue, 21 Apr 2020 14:13:25 -0700 Subject: [PATCH 2/5] mtest: Use textrwap.dedent for large block This allows editors like vim to properly fold the code, and makes it generally easier to read. --- mesonbuild/mtest.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index 23643c535..8ea8d5fe0 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -33,6 +33,7 @@ import signal import subprocess import sys import tempfile +import textwrap import time import typing as T @@ -775,14 +776,14 @@ class TestHarness: write_json_log(self.jsonlogfile, name, result) def print_summary(self) -> None: - msg = ''' -Ok: {:<4} -Expected Fail: {:<4} -Fail: {:<4} -Unexpected Pass: {:<4} -Skipped: {:<4} -Timeout: {:<4} -'''.format(self.success_count, self.expectedfail_count, self.fail_count, + msg = textwrap.dedent(''' + Ok: {:<4} + Expected Fail: {:<4} + Fail: {:<4} + Unexpected Pass: {:<4} + Skipped: {:<4} + Timeout: {:<4} + ''').format(self.success_count, self.expectedfail_count, self.fail_count, self.unexpectedpass_count, self.skip_count, self.timeout_count) print(msg) if self.logfile: From 0c3bb15357419d3cc7a453da25b349a9c34e391d Mon Sep 17 00:00:00 2001 From: Dylan Baker Date: Tue, 21 Apr 2020 14:29:54 -0700 Subject: [PATCH 3/5] mtest: Store individual results for TAP tests This will be used by the Junit writer --- mesonbuild/mtest.py | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index 8ea8d5fe0..a3553ddb3 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -336,30 +336,29 @@ class TestRun: res = TestResult.EXPECTEDFAIL if bool(returncode) else TestResult.UNEXPECTEDPASS else: res = TestResult.FAIL if bool(returncode) else TestResult.OK - return cls(test, test_env, res, returncode, starttime, duration, stdo, stde, cmd) + return cls(test, test_env, res, [], returncode, starttime, duration, stdo, stde, cmd) @classmethod def make_tap(cls, test: 'TestSerialisation', test_env: T.Dict[str, str], returncode: int, starttime: float, duration: float, stdo: str, stde: str, cmd: T.Optional[T.List[str]]) -> 'TestRun': - res = None - num_tests = 0 + res = None # T.Optional[TestResult] + results = [] # T.List[TestResult] failed = False - num_skipped = 0 for i in TAPParser(io.StringIO(stdo)).parse(): if isinstance(i, TAPParser.Bailout): - res = TestResult.ERROR + results.append(TestResult.ERROR) + failed = True elif isinstance(i, TAPParser.Test): - if i.result == TestResult.SKIP: - num_skipped += 1 - elif i.result in (TestResult.FAIL, TestResult.UNEXPECTEDPASS): + results.append(i.result) + if i.result not in {TestResult.OK, TestResult.EXPECTEDFAIL}: failed = True - num_tests += 1 elif isinstance(i, TAPParser.Error): - res = TestResult.ERROR + results.append(TestResult.ERROR) stde += '\nTAP parsing error: ' + i.message + failed = True if returncode != 0: res = TestResult.ERROR @@ -367,7 +366,7 @@ class TestRun: if res is None: # Now determine the overall result of the test based on the outcome of the subcases - if num_skipped == num_tests: + if all(t is TestResult.SKIP for t in results): # This includes the case where num_tests is zero res = TestResult.SKIP elif test.should_fail: @@ -375,14 +374,16 @@ class TestRun: else: res = TestResult.FAIL if failed else TestResult.OK - return cls(test, test_env, res, returncode, starttime, duration, stdo, stde, cmd) + return cls(test, test_env, res, results, returncode, starttime, duration, stdo, stde, cmd) def __init__(self, test: 'TestSerialisation', test_env: T.Dict[str, str], - res: TestResult, returncode: int, starttime: float, duration: float, + res: TestResult, results: T.List[TestResult], returncode: + int, starttime: float, duration: float, stdo: T.Optional[str], stde: T.Optional[str], cmd: T.Optional[T.List[str]]): assert isinstance(res, TestResult) self.res = res + self.results = results # May be an empty list self.returncode = returncode self.starttime = starttime self.duration = duration @@ -391,6 +392,7 @@ class TestRun: self.cmd = cmd self.env = test_env self.should_fail = test.should_fail + self.project = test.project_name def get_log(self) -> str: res = '--- command ---\n' @@ -491,7 +493,7 @@ class SingleTestRunner: cmd = self._get_cmd() if cmd is None: skip_stdout = 'Not run because can not execute cross compiled binaries.' - return TestRun(self.test, self.test_env, TestResult.SKIP, GNU_SKIP_RETURNCODE, time.time(), 0.0, skip_stdout, None, None) + return TestRun(self.test, self.test_env, TestResult.SKIP, [], GNU_SKIP_RETURNCODE, time.time(), 0.0, skip_stdout, None, None) else: wrap = TestHarness.get_wrapper(self.options) if self.options.gdb: @@ -634,7 +636,7 @@ class SingleTestRunner: stdo = "" stde = additional_error if timed_out: - return TestRun(self.test, self.test_env, TestResult.TIMEOUT, p.returncode, starttime, duration, stdo, stde, cmd) + return TestRun(self.test, self.test_env, TestResult.TIMEOUT, [], p.returncode, starttime, duration, stdo, stde, cmd) else: if self.test.protocol == 'exitcode': return TestRun.make_exitcode(self.test, self.test_env, p.returncode, starttime, duration, stdo, stde, cmd) @@ -656,6 +658,7 @@ class TestHarness: self.timeout_count = 0 self.is_run = False self.tests = None + self.results = [] # type: T.List[TestRun] self.logfilename = None # type: T.Optional[str] self.logfile = None # type: T.Optional[T.TextIO] self.jsonlogfile = None # type: T.Optional[T.TextIO] @@ -679,12 +682,11 @@ class TestHarness: self.close_logfiles() def close_logfiles(self) -> None: - if self.logfile: - self.logfile.close() - self.logfile = None - if self.jsonlogfile: - self.jsonlogfile.close() - self.jsonlogfile = None + for f in ['logfile', 'jsonlogfile']: + lfile = getattr(self, f) + if lfile: + lfile.close() + setattr(self, f, None) def merge_suite_options(self, options: argparse.Namespace, test: 'TestSerialisation') -> T.Dict[str, str]: if ':' in options.setup: From 7b7f93a09f90c93d94e4a78ddc1dd766f05bf4a9 Mon Sep 17 00:00:00 2001 From: Dylan Baker Date: Wed, 22 Apr 2020 12:25:49 -0700 Subject: [PATCH 4/5] mtest: Generate a JUnit xml result file JUnit is pretty ubiquitous, lots of services and results viewers understand it, in particular gitlab and jenkins know how to consume JUnit xml. This means projects using CI services can have their test results consumed automatically. Fixes: #6972 --- data/schema.xsd | 96 +++++++++++++++ .../snippets/junit_result_generation.md | 4 + mesonbuild/mtest.py | 113 ++++++++++++++++++ run_unittests.py | 25 ++++ 4 files changed, 238 insertions(+) create mode 100644 data/schema.xsd create mode 100644 docs/markdown/snippets/junit_result_generation.md diff --git a/data/schema.xsd b/data/schema.xsd new file mode 100644 index 000000000..58c6bfde8 --- /dev/null +++ b/data/schema.xsd @@ -0,0 +1,96 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/markdown/snippets/junit_result_generation.md b/docs/markdown/snippets/junit_result_generation.md new file mode 100644 index 000000000..fbe910bb8 --- /dev/null +++ b/docs/markdown/snippets/junit_result_generation.md @@ -0,0 +1,4 @@ +## Meson test now produces JUnit xml from results + +Meson will now generate a JUnit compatible XML file from test results. it +will be in the meson-logs directory and is called testlog.junit.xml. diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index a3553ddb3..323973695 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -36,6 +36,7 @@ import tempfile import textwrap import time import typing as T +import xml.etree.ElementTree as et from . import build from . import environment @@ -321,6 +322,110 @@ class TAPParser: yield self.Error('Too many tests run (expected {}, got {})'.format(plan.count, num_tests)) + +class JunitBuilder: + + """Builder for Junit test results. + + Junit is impossible to stream out, it requires attributes counting the + total number of tests, failures, skips, and errors in the root element + and in each test suite. As such, we use a builder class to track each + test case, and calculate all metadata before writing it out. + + For tests with multiple results (like from a TAP test), we record the + test as a suite with the project_name.test_name. This allows us to track + each result separately. For tests with only one result (such as exit-code + tests) we record each one into a suite with the name project_name. The use + of the project_name allows us to sort subproject tests separately from + the root project. + """ + + def __init__(self, filename: str) -> None: + self.filename = filename + self.root = et.Element( + 'testsuites', tests='0', errors='0', failures='0') + self.suites = {} # type: T.Dict[str, et.Element] + + def log(self, name: str, test: 'TestRun') -> None: + """Log a single test case.""" + # In this case we have a test binary with multiple results. + # We want to record this so that each result is recorded + # separately + if test.results: + suitename = '{}.{}'.format(test.project, name) + assert suitename not in self.suites, 'duplicate suite' + + suite = self.suites[suitename] = et.Element( + 'testsuite', + name=suitename, + tests=str(len(test.results)), + errors=str(sum(1 for r in test.results if r is TestResult.ERROR)), + failures=str(sum(1 for r in test.results if r in + {TestResult.FAIL, TestResult.UNEXPECTEDPASS, TestResult.TIMEOUT})), + skipped=str(sum(1 for r in test.results if r is TestResult.SKIP)), + ) + + for i, result in enumerate(test.results): + # Both name and classname are required. Set them both to the + # number of the test in a TAP test, as TAP doesn't give names. + testcase = et.SubElement(suite, 'testcase', name=str(i), classname=str(i)) + if result is TestResult.SKIP: + et.SubElement(testcase, 'skipped') + elif result is TestResult.ERROR: + et.SubElement(testcase, 'error') + elif result is TestResult.FAIL: + et.SubElement(testcase, 'failure') + elif result is TestResult.UNEXPECTEDPASS: + fail = et.SubElement(testcase, 'failure') + fail.text = 'Test unexpected passed.' + elif result is TestResult.TIMEOUT: + fail = et.SubElement(testcase, 'failure') + fail.text = 'Test did not finish before configured timeout.' + if test.stdo: + out = et.SubElement(suite, 'system-out') + out.text = test.stdo.rstrip() + if test.stde: + err = et.SubElement(suite, 'system-err') + err.text = test.stde.rstrip() + else: + if test.project not in self.suites: + suite = self.suites[test.project] = et.Element( + 'testsuite', name=test.project, tests='1', errors='0', + failures='0', skipped='0') + else: + suite = self.suites[test.project] + suite.attrib['tests'] = str(int(suite.attrib['tests']) + 1) + + testcase = et.SubElement(suite, 'testcase', name=name, classname=name) + if test.res is TestResult.SKIP: + et.SubElement(testcase, 'skipped') + suite.attrib['skipped'] = str(int(suite.attrib['skipped']) + 1) + elif test.res is TestResult.ERROR: + et.SubElement(testcase, 'error') + suite.attrib['errors'] = str(int(suite.attrib['errors']) + 1) + elif test.res is TestResult.FAIL: + et.SubElement(testcase, 'failure') + suite.attrib['failures'] = str(int(suite.attrib['failures']) + 1) + if test.stdo: + out = et.SubElement(testcase, 'system-out') + out.text = test.stdo.rstrip() + if test.stde: + err = et.SubElement(testcase, 'system-err') + err.text = test.stde.rstrip() + + def write(self) -> None: + """Calculate total test counts and write out the xml result.""" + for suite in self.suites.values(): + self.root.append(suite) + # Skipped is really not allowed in the "testsuits" element + for attr in ['tests', 'errors', 'failures']: + self.root.attrib[attr] = str(int(self.root.attrib[attr]) + int(suite.attrib[attr])) + + tree = et.ElementTree(self.root) + with open(self.filename, 'wb') as f: + tree.write(f, encoding='utf-8', xml_declaration=True) + + class TestRun: @classmethod @@ -662,6 +767,7 @@ class TestHarness: self.logfilename = None # type: T.Optional[str] self.logfile = None # type: T.Optional[T.TextIO] self.jsonlogfile = None # type: T.Optional[T.TextIO] + self.junit = None # type: T.Optional[JunitBuilder] if self.options.benchmark: self.tests = load_benchmarks(options.wd) else: @@ -776,6 +882,8 @@ class TestHarness: self.logfile.write(result_str) if self.jsonlogfile: write_json_log(self.jsonlogfile, name, result) + if self.junit: + self.junit.log(name, result) def print_summary(self) -> None: msg = textwrap.dedent(''' @@ -790,6 +898,8 @@ class TestHarness: print(msg) if self.logfile: self.logfile.write(msg) + if self.junit: + self.junit.write() def print_collected_logs(self) -> None: if len(self.collected_logs) > 0: @@ -906,6 +1016,9 @@ class TestHarness: if namebase: logfile_base += '-' + namebase.replace(' ', '_') + + self.junit = JunitBuilder(logfile_base + '.junit.xml') + self.logfilename = logfile_base + '.txt' self.jsonlogfilename = logfile_base + '.json' diff --git a/run_unittests.py b/run_unittests.py index 831e53fdc..da898a31b 100755 --- a/run_unittests.py +++ b/run_unittests.py @@ -4617,6 +4617,31 @@ recommended as it is not supported on some platforms''') out = self.build() self.assertNotIn('Project configured', out) + def _test_junit(self, case: str) -> None: + try: + import lxml.etree as et + except ImportError: + raise unittest.SkipTest('lxml required, but not found.') + + schema = et.XMLSchema(et.parse(str(Path(__file__).parent / 'data' / 'schema.xsd'))) + + testdir = os.path.join(self.common_test_dir, case) + self.init(testdir) + self.run_tests() + + junit = et.parse(str(Path(self.builddir) / 'meson-logs' / 'testlog.junit.xml')) + try: + schema.assertValid(junit) + except et.DocumentInvalid as e: + self.fail(e.error_log) + + def test_junit_valid_tap(self): + self._test_junit('213 tap tests') + + def test_junit_valid_exitcode(self): + self._test_junit('44 test args') + + class FailureTests(BasePlatformTests): ''' Tests that test failure conditions. Build files here should be dynamically From 97f7e3d83cdafbd94d4a164582ea71035c996baa Mon Sep 17 00:00:00 2001 From: Dylan Baker Date: Thu, 23 Apr 2020 10:36:13 -0700 Subject: [PATCH 5/5] ci: Try to fix opensuse image --- ci/ciimage/opensuse/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/ciimage/opensuse/install.sh b/ci/ciimage/opensuse/install.sh index 79be8a117..7c90ec33f 100755 --- a/ci/ciimage/opensuse/install.sh +++ b/ci/ciimage/opensuse/install.sh @@ -17,7 +17,7 @@ pkgs=( libxml2-devel libxslt-devel libyaml-devel glib2-devel json-glib-devel boost-devel libboost_date_time-devel libboost_filesystem-devel libboost_locale-devel libboost_system-devel libboost_test-devel libboost_log-devel libboost_regex-devel - libboost_python-devel libboost_python-py3-1_71_0-devel libboost_regex-devel + libboost_python-py3-1_71_0-devel libboost_regex-devel ) # Sys update