Merge pull request #6621 from jon-turney/project_tests_cleanup

Various cleanups and documentation improvements for run_project_tests.py
pull/5967/head
Jussi Pakkanen 5 years ago committed by GitHub
commit d8c3dc66d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 37
      docs/markdown/Contributing.md
  2. 102
      run_project_tests.py
  3. 0
      test cases/common/56 install script/no-installed-files

@ -127,6 +127,8 @@ project tests. To run all tests, execute `./run_tests.py`. Unit tests
can be run with `./run_unittests.py` and project tests with
`./run_project_tests.py`.
### Project tests
Subsets of project tests can be selected with
`./run_project_tests.py --only` option. This can save a great deal of
time when only a certain part of Meson is being tested.
@ -139,7 +141,7 @@ For example, all the CUDA project tests run and pass on Windows via
`./run_project_tests.py --only cuda --backend ninja`
Each project test is a standalone project that can be compiled on its
own. They are all in `test cases` subdirectory. The simplest way to
own. They are all in the `test cases` subdirectory. The simplest way to
run a single project test is to do something like `./meson.py test\
cases/common/1\ trivial builddir`. The one exception to this is `test
cases/unit` directory discussed below.
@ -153,13 +155,32 @@ should be implemented as a Python script. The goal of test projects is
also to provide sample projects that end users can use as a base for
their own projects.
All project tests follow the same pattern: they are compiled, tests
are run and finally install is run. Passing means that building and
tests succeed and installed files match the `installed_files.txt` file
in the test's source root. Any tests that require more thorough
analysis, such as checking that certain compiler arguments can be
found in the command line or that the generated pkg-config files
actually work should be done with a unit test.
All project tests follow the same pattern: they are configured, compiled, tests
are run and finally install is run. Passing means that configuring, building and
tests succeed and that installed files match those expected.
Any tests that require more thorough analysis, such as checking that certain
compiler arguments can be found in the command line or that the generated
pkg-config files actually work should be done with a unit test.
The following files in the test's source root are consulted, if they exist:
* `installed_files.txt` lists the files which are expected to be installed.
Various constructs containing `?` are used to indicate platform specific
filename variations (e.g. `?so` represents the platform appropriate suffix for a
shared library)
* `setup_env.json` contains a dictionary which specifies additional
environment variables to be set during the configure step of the test. `@ROOT@`
is replaced with the absolute path of the source directory.
* `crossfile.ini` and `nativefile.ini` are passed to the configure step with
`--cross-file` and `--native-file` options, respectively.
Additionally:
* `mlog.cmd_ci_include()` can be called from anywhere inside meson to capture the
contents of an additional file into the CI log on failure.
Projects needed by unit tests are in the `test cases/unit`
subdirectory. They are not run as part of `./run_project_tests.py`.

@ -65,17 +65,30 @@ class BuildStep(Enum):
class TestResult:
def __init__(self, msg, step, stdo, stde, mlog, cicmds, conftime=0, buildtime=0, testtime=0):
self.msg = msg
self.step = step
self.stdo = stdo
self.stde = stde
self.mlog = mlog
def __init__(self, cicmds):
self.msg = '' # empty msg indicates test success
self.stdo = ''
self.stde = ''
self.mlog = ''
self.cicmds = cicmds
self.conftime = conftime
self.buildtime = buildtime
self.testtime = testtime
self.conftime = 0
self.buildtime = 0
self.testtime = 0
def add_step(self, step, stdo, stde, mlog='', time=0):
self.step = step
self.stdo += stdo
self.stde += stde
self.mlog += mlog
if step == BuildStep.configure:
self.conftime = time
elif step == BuildStep.build:
self.buildtime = time
elif step == BuildStep.test:
self.testtime = time
def fail(self, msg):
self.msg = msg
@functools.total_ordering
class TestDef:
@ -230,14 +243,10 @@ def validate_install(srcdir: str, installdir: Path, compiler, env) -> str:
# List of installed files
info_file = Path(srcdir) / 'installed_files.txt'
installdir = Path(installdir)
# If this exists, the test does not install any other files
noinst_file = Path('usr/no-installed-files')
expected = {} # type: T.Dict[Path, bool]
ret_msg = ''
# Generate list of expected files
if (installdir / noinst_file).is_file():
expected[noinst_file] = False
elif info_file.is_file():
if info_file.is_file():
with info_file.open() as f:
for line in f:
line = platform_fix_name(line.strip(), compiler, env)
@ -434,16 +443,20 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen
except Exception:
mesonlog = no_meson_log_msg
cicmds = run_ci_commands(mesonlog)
gen_time = time.time() - gen_start
testresult = TestResult(cicmds)
testresult.add_step(BuildStep.configure, stdo, stde, mesonlog, time.time() - gen_start)
if should_fail == 'meson':
if returncode == 1:
return TestResult('', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)
return testresult
elif returncode != 0:
return TestResult('Test exited with unexpected status {}'.format(returncode), BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)
testresult.fail('Test exited with unexpected status {}.'.format(returncode))
return testresult
else:
return TestResult('Test that should have failed succeeded', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)
testresult.fail('Test that should have failed succeeded.')
return testresult
if returncode != 0:
return TestResult('Generating the build system failed.', BuildStep.configure, stdo, stde, mesonlog, cicmds, gen_time)
testresult.fail('Generating the build system failed.')
return testresult
builddata = build.load(test_build_dir)
# Touch the meson.build file to force a regenerate so we can test that
# regeneration works before a build is run.
@ -453,15 +466,15 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen
dir_args = get_backend_args_for_dir(backend, test_build_dir)
build_start = time.time()
pc, o, e = Popen_safe(compile_commands + dir_args, cwd=test_build_dir)
build_time = time.time() - build_start
stdo += o
stde += e
testresult.add_step(BuildStep.build, o, e, '', time.time() - build_start)
if should_fail == 'build':
if pc.returncode != 0:
return TestResult('', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time)
return TestResult('Test that should have failed to build succeeded', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time)
return testresult
testresult.fail('Test that should have failed to build succeeded.')
return testresult
if pc.returncode != 0:
return TestResult('Compiling source code failed.', BuildStep.build, stdo, stde, mesonlog, cicmds, gen_time, build_time)
testresult.fail('Compiling source code failed.')
return testresult
# Touch the meson.build file to force a regenerate so we can test that
# regeneration works after a build is complete.
ensure_backend_detects_changes(backend)
@ -469,37 +482,44 @@ def _run_test(testdir, test_build_dir, install_dir, extra_args, compiler, backen
test_start = time.time()
# Test in-process
(returncode, tstdo, tstde, test_log) = run_test_inprocess(test_build_dir)
test_time = time.time() - test_start
stdo += tstdo
stde += tstde
mesonlog += test_log
testresult.add_step(BuildStep.test, tstdo, tstde, test_log, time.time() - test_start)
if should_fail == 'test':
if returncode != 0:
return TestResult('', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time)
return TestResult('Test that should have failed to run unit tests succeeded', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time)
return testresult
testresult.fail('Test that should have failed to run unit tests succeeded.')
return testresult
if returncode != 0:
return TestResult('Running unit tests failed.', BuildStep.test, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)
testresult.fail('Running unit tests failed.')
return testresult
# Do installation, if the backend supports it
if install_commands:
env = os.environ.copy()
env['DESTDIR'] = install_dir
# Install with subprocess
pi, o, e = Popen_safe(install_commands, cwd=test_build_dir, env=env)
stdo += o
stde += e
testresult.add_step(BuildStep.install, o, e)
if pi.returncode != 0:
return TestResult('Running install failed.', BuildStep.install, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)
testresult.fail('Running install failed.')
return testresult
# Clean with subprocess
env = os.environ.copy()
pi, o, e = Popen_safe(clean_commands + dir_args, cwd=test_build_dir, env=env)
stdo += o
stde += e
testresult.add_step(BuildStep.clean, o, e)
if pi.returncode != 0:
return TestResult('Running clean failed.', BuildStep.clean, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)
testresult.fail('Running clean failed.')
return testresult
# Validate installed files
testresult.add_step(BuildStep.install, '', '')
if not install_commands:
return TestResult('', BuildStep.install, '', '', mesonlog, cicmds, gen_time, build_time, test_time)
return TestResult(validate_install(testdir, install_dir, compiler, builddata.environment),
BuildStep.validate, stdo, stde, mesonlog, cicmds, gen_time, build_time, test_time)
return testresult
install_msg = validate_install(testdir, install_dir, compiler, builddata.environment)
if install_msg:
testresult.fail(install_msg)
return testresult
return testresult
def gather_tests(testdir: Path) -> T.Iterator[TestDef]:
tests = [t.name for t in testdir.glob('*') if t.is_dir()]

Loading…
Cancel
Save