introspect: add test dependencies info to test/benchmark JSON

Add the ids of any target that needs to be rebuilt before running the
tests as computed by the backend, to the introspection data for tests and benchmarks.
This also includes anything that appears on the test's command line.

Without this information, IDEs must update the entire build before running
any test.  They can now instead selectively build the test executable
itself and anything that is needed to run it.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
pull/7698/head
Paolo Bonzini 4 years ago committed by Daniel Mensinger
parent 0b0873c743
commit fa5c2363eb
  1. 6
      docs/markdown/IDE-integration.md
  2. 5
      docs/markdown/snippets/introspect_test_deps.md
  3. 12
      mesonbuild/backend/backends.py
  4. 1
      mesonbuild/mintro.py
  5. 18
      run_unittests.py
  6. 5
      test cases/unit/57 introspection/cp.py
  7. 7
      test cases/unit/57 introspection/meson.build

@ -247,6 +247,7 @@ line arguments, environment variable settings and how to process the output.
"is_parallel": true / false,
"protocol": "exitcode" / "tap",
"cmd": ["command", "to", "run"],
"depends": ["target1-id", "target2-id"],
"env": {
"VARIABLE1": "value 1",
"VARIABLE2": "value 2"
@ -254,6 +255,11 @@ line arguments, environment variable settings and how to process the output.
}
```
The `depends` entry *(since 0.56.0)* contains target ids; they can be
looked up in the targets introspection data. The executable
pointed to by `cmd` is also included in the entry, as are any
arguments to the test that are build products.
## Build system files
It is also possible to get Meson build files used in your current project. This

@ -0,0 +1,5 @@
## Dependencies listed in test and benchmark introspection
The introspection data for tests and benchmarks now includes the target
ids for executables and built files that are needed by the test. IDEs can
use this feature to update the build more quickly before running a test.

@ -120,7 +120,8 @@ class TestSerialisation:
env: build.EnvironmentVariables, should_fail: bool,
timeout: T.Optional[int], workdir: T.Optional[str],
extra_paths: T.List[str], protocol: TestProtocol, priority: int,
cmd_is_built: bool):
cmd_is_built: bool,
depends: T.List[str]):
self.name = name
self.project_name = project
self.suite = suite
@ -140,6 +141,7 @@ class TestSerialisation:
self.priority = priority
self.needs_exe_wrapper = needs_exe_wrapper
self.cmd_is_built = cmd_is_built
self.depends = depends
def get_backend_from_name(backend: str, build: T.Optional[build.Build] = None, interpreter: T.Optional['Interpreter'] = None) -> T.Optional['Backend']:
@ -830,7 +832,12 @@ class Backend:
extra_paths = []
cmd_args = []
depends = set(t.depends)
if isinstance(exe, build.Target):
depends.add(exe)
for a in unholder(t.cmd_args):
if isinstance(a, build.Target):
depends.add(a)
if isinstance(a, build.BuildTarget):
extra_paths += self.determine_windows_extra_paths(a, [])
if isinstance(a, mesonlib.File):
@ -852,7 +859,8 @@ class Backend:
t.is_parallel, cmd_args, t.env,
t.should_fail, t.timeout, t.workdir,
extra_paths, t.protocol, t.priority,
isinstance(exe, build.Executable))
isinstance(exe, build.Executable),
[x.get_id() for x in depends])
arr.append(ts)
return arr

@ -325,6 +325,7 @@ def get_test_list(testdata) -> T.List[T.Dict[str, T.Union[str, int, T.List[str],
to['is_parallel'] = t.is_parallel
to['priority'] = t.priority
to['protocol'] = str(t.protocol)
to['depends'] = t.depends
result.append(to)
return result

@ -4281,6 +4281,7 @@ recommended as it is not supported on some platforms''')
('suite', list),
('is_parallel', bool),
('protocol', str),
('depends', list),
]
buildoptions_keylist = [
@ -4338,12 +4339,28 @@ recommended as it is not supported on some platforms''')
assertKeyTypes(root_keylist, res)
# Match target ids to input and output files for ease of reference
src_to_id = {}
out_to_id = {}
for i in res['targets']:
print(json.dump(i, sys.stdout))
out_to_id.update({os.path.relpath(out, self.builddir): i['id']
for out in i['filename']})
for group in i['target_sources']:
src_to_id.update({os.path.relpath(src, testdir): i['id']
for src in group['sources']})
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
deps_to_find = {'test case 1': [src_to_id['t1.cpp']],
'test case 2': [src_to_id['t2.cpp'], src_to_id['t3.cpp']],
'benchmark 1': [out_to_id['file2'], src_to_id['t3.cpp']]}
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertEqual(sorted(i['depends']),
sorted(deps_to_find[i['name']]))
self.assertListEqual(tests_to_find, [])
# Check buildoptions
@ -4484,6 +4501,7 @@ recommended as it is not supported on some platforms''')
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
res_wb = [i for i in res_wb if i['type'] != 'custom']
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:

@ -0,0 +1,5 @@
#! /usr/bin/env python3
import sys
from shutil import copyfile
copyfile(*sys.argv[1:])

@ -26,6 +26,9 @@ var1 = '1'
var2 = 2.to_string()
var3 = 'test3'
cus = custom_target('custom target test', output: 'file2', input: 'cp.py',
command: [find_program('cp.py'), '@INPUT@', '@OUTPUT@'])
t1 = executable('test' + var1, ['t1.cpp'], link_with: [sharedlib], install: not false, build_by_default: get_option('test_opt2'))
t2 = executable('test@0@'.format('@0@'.format(var2)), sources: ['t2.cpp'], link_with: [staticlib])
t3 = executable(var3, 't3.cpp', link_with: [sharedlib, staticlib], dependencies: [dep1])
@ -44,8 +47,8 @@ osmesa_lib_name = osmesa_lib_name + osmesa_bits
message(osmesa_lib_name) # Infinite recursion gets triggered here when the parameter osmesa_lib_name is resolved
test('test case 1', t1)
test('test case 2', t2)
benchmark('benchmark 1', t3)
test('test case 2', t2, depends: t3)
benchmark('benchmark 1', t3, args: cus)
### Stuff to test the AST JSON printer
foreach x : ['a', 'b', 'c']

Loading…
Cancel
Save