diff --git a/docs/markdown/Unit-tests.md b/docs/markdown/Unit-tests.md index a8e72737c..9148bd5a0 100644 --- a/docs/markdown/Unit-tests.md +++ b/docs/markdown/Unit-tests.md @@ -51,10 +51,12 @@ By default Meson uses as many concurrent processes as there are cores on the tes $ MESON_TESTTHREADS=5 ninja test ``` -## Skipped tests +## Skipped tests and hard errors Sometimes a test can only determine at runtime that it can not be run. The GNU standard approach in this case is to exit the program with error code 77. Meson will detect this and report these tests as skipped rather than failed. This behavior was added in version 0.37.0. +In addition, sometimes a test fails set up so that it should fail even if it is marked as an expected failure. The GNU standard approach in this case is to exit the program with error code 99. Again, Meson will detect this and report these tests as `ERROR`, ignoring the setting of `should_fail`. This behavior was added in version 0.50.0. + ## Testing tool The goal of the meson test tool is to provide a simple way to run tests in a variety of different ways. The tool is designed to be run in the build directory. diff --git a/mesonbuild/mtest.py b/mesonbuild/mtest.py index 57b4a1211..21e54032b 100644 --- a/mesonbuild/mtest.py +++ b/mesonbuild/mtest.py @@ -36,6 +36,10 @@ import enum # mean that the test should be skipped. GNU_SKIP_RETURNCODE = 77 +# GNU autotools interprets a return code of 99 from tests it executes to +# mean that the test failed even before testing what it is supposed to test. +GNU_ERROR_RETURNCODE = 99 + def is_windows(): platname = platform.system().lower() return platname == 'windows' or 'mingw' in platname @@ -146,6 +150,7 @@ class TestResult(enum.Enum): FAIL = 'FAIL' EXPECTEDFAIL = 'EXPECTEDFAIL' UNEXPECTEDPASS = 'UNEXPECTEDPASS' + ERROR = 'ERROR' class TestRun: @@ -153,11 +158,13 @@ class TestRun: def make_exitcode(test, returncode, duration, stdo, stde, cmd): if returncode == GNU_SKIP_RETURNCODE: res = TestResult.SKIP + elif returncode == GNU_ERROR_RETURNCODE: + res = TestResult.ERROR elif test.should_fail: res = TestResult.EXPECTEDFAIL if bool(returncode) else TestResult.UNEXPECTEDPASS else: res = TestResult.FAIL if bool(returncode) else TestResult.OK - return TestRun(test, res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env) + return TestRun(test, res, returncode, duration, stdo, stde, cmd) def __init__(self, test, res, returncode, duration, stdo, stde, cmd): assert isinstance(res, TestResult) @@ -474,7 +481,7 @@ class TestHarness: self.skip_count += 1 elif result.res is TestResult.OK: self.success_count += 1 - elif result.res is TestResult.FAIL: + elif result.res is TestResult.FAIL or result.res is TestResult.ERROR: self.fail_count += 1 elif result.res is TestResult.EXPECTEDFAIL: self.expectedfail_count += 1 @@ -496,7 +503,8 @@ class TestHarness: (num, name, padding1, result.res.value, padding2, result.duration, status) ok_statuses = (TestResult.OK, TestResult.EXPECTEDFAIL) - bad_statuses = (TestResult.FAIL, TestResult.TIMEOUT, TestResult.UNEXPECTEDPASS) + bad_statuses = (TestResult.FAIL, TestResult.TIMEOUT, TestResult.UNEXPECTEDPASS, + TestResult.ERROR) if not self.options.quiet or result.res not in ok_statuses: if result.res not in ok_statuses and mlog.colorize_console: if result.res in bad_statuses: diff --git a/test cases/failing test/4 hard error/main.c b/test cases/failing test/4 hard error/main.c new file mode 100644 index 000000000..a1e705ade --- /dev/null +++ b/test cases/failing test/4 hard error/main.c @@ -0,0 +1,3 @@ +int main(void) { + return 99; +} diff --git a/test cases/failing test/4 hard error/meson.build b/test cases/failing test/4 hard error/meson.build new file mode 100644 index 000000000..6979b0416 --- /dev/null +++ b/test cases/failing test/4 hard error/meson.build @@ -0,0 +1,4 @@ +project('trivial', 'c') + +# Exit code 99 even overrides should_fail +test('My Test', executable('main', 'main.c'), should_fail: true)