Merge pull request #9834 from bonzini/test-verbose-kwarg

New keyword argument `verbose` for tests and benchmarks
pull/9512/merge
Jussi Pakkanen 3 years ago committed by GitHub
commit 9a1a5c2b74
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 6
      docs/markdown/snippets/test-verbose.md
  2. 8
      docs/yaml/functions/benchmark.yaml
  3. 7
      docs/yaml/functions/test.yaml
  4. 4
      mesonbuild/backend/backends.py
  5. 4
      mesonbuild/interpreter/interpreter.py
  6. 3
      mesonbuild/interpreter/interpreterobjects.py
  7. 26
      mesonbuild/mtest.py
  8. 2
      test cases/common/206 tap tests/meson.build
  9. 7
      unittests/allplatformstests.py

@ -0,0 +1,6 @@
## New keyword argument `verbose` for tests and benchmarks
The new keyword argument `verbose` can be used to mark tests and benchmarks
that must always be logged verbosely on the console. This is particularly
useful for long-running tests, or when a single Meson test() is wrapping
an external test harness.

@ -104,3 +104,11 @@ kwargs:
The starting order of tests with identical priorities is The starting order of tests with identical priorities is
implementation-defined. The default priority is 0, negative numbers are implementation-defined. The default priority is 0, negative numbers are
permitted. permitted.
verbose:
type: bool
since: 0.62.0
default: false
description: |
if true, forces the test results to be logged as if `--verbose` was passed
to `meson test`.

@ -33,6 +33,13 @@ description: |
test(..., env: nomalloc, ...) test(..., env: nomalloc, ...)
``` ```
In addition to running individual executables as test cases, `test()`
can also be used to invoke an external test harness. In this case,
it is best to use `verbose: true` *(since 0.62.0)* and, if supported
by the external harness, `protocol: 'tap'` *(since 0.50.0)*. This will
ensure that Meson logs each subtest as it runs, instead of including
the whole log at the end of the run.
Defined tests can be run in a backend-agnostic way by calling Defined tests can be run in a backend-agnostic way by calling
`meson test` inside the build dir, or by using backend-specific `meson test` inside the build dir, or by using backend-specific
commands, such as `ninja test` or `msbuild RUN_TESTS.vcxproj`. commands, such as `ninja test` or `msbuild RUN_TESTS.vcxproj`.

@ -225,6 +225,7 @@ class TestSerialisation:
cmd_is_built: bool cmd_is_built: bool
depends: T.List[str] depends: T.List[str]
version: str version: str
verbose: bool
def __post_init__(self) -> None: def __post_init__(self) -> None:
if self.exe_wrapper is not None: if self.exe_wrapper is not None:
@ -1147,7 +1148,8 @@ class Backend:
extra_paths, t.protocol, t.priority, extra_paths, t.protocol, t.priority,
isinstance(exe, build.Executable), isinstance(exe, build.Executable),
[x.get_id() for x in depends], [x.get_id() for x in depends],
self.environment.coredata.version) self.environment.coredata.version,
t.verbose)
arr.append(ts) arr.append(ts)
return arr return arr

@ -215,6 +215,7 @@ TEST_KWARGS: T.List[KwargInfo] = [
ENV_KW, ENV_KW,
DEPENDS_KW.evolve(since='0.46.0'), DEPENDS_KW.evolve(since='0.46.0'),
KwargInfo('suite', ContainerTypeInfo(list, str), listify=True, default=['']), # yes, a list of empty string KwargInfo('suite', ContainerTypeInfo(list, str), listify=True, default=['']), # yes, a list of empty string
KwargInfo('verbose', bool, default=False, since='0.62.0'),
] ]
permitted_dependency_kwargs = { permitted_dependency_kwargs = {
@ -1972,7 +1973,8 @@ external dependencies (including libraries) must go to "dependencies".''')
kwargs['timeout'], kwargs['timeout'],
kwargs['workdir'], kwargs['workdir'],
kwargs['protocol'], kwargs['protocol'],
kwargs['priority']) kwargs['priority'],
kwargs['verbose'])
def add_test(self, node: mparser.BaseNode, args: T.List, kwargs: T.Dict[str, T.Any], is_base_test: bool): def add_test(self, node: mparser.BaseNode, args: T.List, kwargs: T.Dict[str, T.Any], is_base_test: bool):
t = self.make_test(node, args, kwargs) t = self.make_test(node, args, kwargs)

@ -636,7 +636,7 @@ class Test(MesonInterpreterObject):
cmd_args: T.List[T.Union[str, mesonlib.File, build.Target]], cmd_args: T.List[T.Union[str, mesonlib.File, build.Target]],
env: build.EnvironmentVariables, env: build.EnvironmentVariables,
should_fail: bool, timeout: int, workdir: T.Optional[str], protocol: str, should_fail: bool, timeout: int, workdir: T.Optional[str], protocol: str,
priority: int): priority: int, verbose: bool):
super().__init__() super().__init__()
self.name = name self.name = name
self.suite = listify(suite) self.suite = listify(suite)
@ -651,6 +651,7 @@ class Test(MesonInterpreterObject):
self.workdir = workdir self.workdir = workdir
self.protocol = TestProtocol.from_str(protocol) self.protocol = TestProtocol.from_str(protocol)
self.priority = priority self.priority = priority
self.verbose = verbose
def get_exe(self) -> T.Union[ExternalProgram, build.Executable, build.CustomTarget]: def get_exe(self) -> T.Union[ExternalProgram, build.Executable, build.CustomTarget]:
return self.exe return self.exe

@ -600,7 +600,7 @@ class ConsoleLogger(TestLogger):
self.progress_task = asyncio.ensure_future(report_progress()) self.progress_task = asyncio.ensure_future(report_progress())
def start_test(self, harness: 'TestHarness', test: 'TestRun') -> None: def start_test(self, harness: 'TestHarness', test: 'TestRun') -> None:
if harness.options.verbose and test.cmdline: if test.verbose and test.cmdline:
self.flush() self.flush()
print(harness.format(test, mlog.colorize_console(), print(harness.format(test, mlog.colorize_console(),
max_left_width=self.max_left_width, max_left_width=self.max_left_width,
@ -619,12 +619,12 @@ class ConsoleLogger(TestLogger):
self.request_update() self.request_update()
def shorten_log(self, harness: 'TestHarness', result: 'TestRun') -> str: def shorten_log(self, harness: 'TestHarness', result: 'TestRun') -> str:
if not harness.options.verbose and not harness.options.print_errorlogs: if not result.verbose and not harness.options.print_errorlogs:
return '' return ''
log = result.get_log(mlog.colorize_console(), log = result.get_log(mlog.colorize_console(),
stderr_only=result.needs_parsing) stderr_only=result.needs_parsing)
if harness.options.verbose: if result.verbose:
return log return log
lines = log.splitlines() lines = log.splitlines()
@ -634,7 +634,7 @@ class ConsoleLogger(TestLogger):
return str(mlog.bold('Listing only the last 100 lines from a long log.\n')) + '\n'.join(lines[-100:]) return str(mlog.bold('Listing only the last 100 lines from a long log.\n')) + '\n'.join(lines[-100:])
def print_log(self, harness: 'TestHarness', result: 'TestRun') -> None: def print_log(self, harness: 'TestHarness', result: 'TestRun') -> None:
if not harness.options.verbose: if not result.verbose:
cmdline = result.cmdline cmdline = result.cmdline
if not cmdline: if not cmdline:
print(result.res.get_command_marker() + result.stdo) print(result.res.get_command_marker() + result.stdo)
@ -648,7 +648,7 @@ class ConsoleLogger(TestLogger):
print(self.output_end) print(self.output_end)
def log_subtest(self, harness: 'TestHarness', test: 'TestRun', s: str, result: TestResult) -> None: def log_subtest(self, harness: 'TestHarness', test: 'TestRun', s: str, result: TestResult) -> None:
if harness.options.verbose or (harness.options.print_errorlogs and result.is_bad()): if test.verbose or (harness.options.print_errorlogs and result.is_bad()):
self.flush() self.flush()
print(harness.format(test, mlog.colorize_console(), max_left_width=self.max_left_width, print(harness.format(test, mlog.colorize_console(), max_left_width=self.max_left_width,
prefix=self.sub, prefix=self.sub,
@ -659,22 +659,22 @@ class ConsoleLogger(TestLogger):
def log(self, harness: 'TestHarness', result: 'TestRun') -> None: def log(self, harness: 'TestHarness', result: 'TestRun') -> None:
self.running_tests.remove(result) self.running_tests.remove(result)
if result.res is TestResult.TIMEOUT and harness.options.verbose: if result.res is TestResult.TIMEOUT and result.verbose:
self.flush() self.flush()
print(f'{result.name} time out (After {result.timeout} seconds)') print(f'{result.name} time out (After {result.timeout} seconds)')
if not harness.options.quiet or not result.res.is_ok(): if not harness.options.quiet or not result.res.is_ok():
self.flush() self.flush()
if harness.options.verbose and not result.is_parallel and result.cmdline: if result.verbose and not result.is_parallel and result.cmdline:
if not result.needs_parsing: if not result.needs_parsing:
print(self.output_end) print(self.output_end)
print(harness.format(result, mlog.colorize_console(), max_left_width=self.max_left_width)) print(harness.format(result, mlog.colorize_console(), max_left_width=self.max_left_width))
else: else:
print(harness.format(result, mlog.colorize_console(), max_left_width=self.max_left_width), print(harness.format(result, mlog.colorize_console(), max_left_width=self.max_left_width),
flush=True) flush=True)
if harness.options.verbose or result.res.is_bad(): if result.verbose or result.res.is_bad():
self.print_log(harness, result) self.print_log(harness, result)
if harness.options.verbose or result.res.is_bad(): if result.verbose or result.res.is_bad():
print(flush=True) print(flush=True)
self.request_update() self.request_update()
@ -867,7 +867,7 @@ class TestRun:
return super().__new__(TestRun.PROTOCOL_TO_CLASS[test.protocol]) return super().__new__(TestRun.PROTOCOL_TO_CLASS[test.protocol])
def __init__(self, test: TestSerialisation, test_env: T.Dict[str, str], def __init__(self, test: TestSerialisation, test_env: T.Dict[str, str],
name: str, timeout: T.Optional[int], is_parallel: bool): name: str, timeout: T.Optional[int], is_parallel: bool, verbose: bool):
self.res = TestResult.PENDING self.res = TestResult.PENDING
self.test = test self.test = test
self._num = None # type: T.Optional[int] self._num = None # type: T.Optional[int]
@ -885,6 +885,7 @@ class TestRun:
self.project = test.project_name self.project = test.project_name
self.junit = None # type: T.Optional[et.ElementTree] self.junit = None # type: T.Optional[et.ElementTree]
self.is_parallel = is_parallel self.is_parallel = is_parallel
self.verbose = verbose
def start(self, cmd: T.List[str]) -> None: def start(self, cmd: T.List[str]) -> None:
self.res = TestResult.RUNNING self.res = TestResult.RUNNING
@ -1335,11 +1336,12 @@ class SingleTestRunner:
timeout = self.test.timeout * self.options.timeout_multiplier timeout = self.test.timeout * self.options.timeout_multiplier
is_parallel = test.is_parallel and self.options.num_processes > 1 and not self.options.gdb is_parallel = test.is_parallel and self.options.num_processes > 1 and not self.options.gdb
self.runobj = TestRun(test, env, name, timeout, is_parallel) verbose = (test.verbose or self.options.verbose) and not self.options.quiet
self.runobj = TestRun(test, env, name, timeout, is_parallel, verbose)
if self.options.gdb: if self.options.gdb:
self.console_mode = ConsoleUser.GDB self.console_mode = ConsoleUser.GDB
elif self.options.verbose and not is_parallel and not self.runobj.needs_parsing: elif self.runobj.verbose and not is_parallel and not self.runobj.needs_parsing:
self.console_mode = ConsoleUser.STDOUT self.console_mode = ConsoleUser.STDOUT
else: else:
self.console_mode = ConsoleUser.LOGGER self.console_mode = ConsoleUser.LOGGER

@ -7,7 +7,7 @@ test('fail', tester, args : ['not ok'], should_fail: true, protocol: 'tap')
test('xfail', tester, args : ['not ok # todo'], protocol: 'tap') test('xfail', tester, args : ['not ok # todo'], protocol: 'tap')
test('xpass', tester, args : ['ok # todo'], should_fail: true, protocol: 'tap') test('xpass', tester, args : ['ok # todo'], should_fail: true, protocol: 'tap')
test('skip', tester, args : ['ok # skip'], protocol: 'tap') test('skip', tester, args : ['ok # skip'], protocol: 'tap')
test('partially skipped', tester, args : ['ok 1\nok 2 # skip'], protocol: 'tap') test('partially skipped', tester, args : ['ok 1\nok 2 # skip'], suite: ['verbose'], protocol: 'tap', verbose: true)
test('partially skipped (real-world example)', cat, args : [files('issue7515.txt')], protocol: 'tap') test('partially skipped (real-world example)', cat, args : [files('issue7515.txt')], protocol: 'tap')
test('skip comment', tester, args : ['ok # Skipped: with a comment'], protocol: 'tap') test('skip comment', tester, args : ['ok # Skipped: with a comment'], protocol: 'tap')
test('skip failure', tester, args : ['not ok # skip'], should_fail: true, protocol: 'tap') test('skip failure', tester, args : ['not ok # skip'], should_fail: true, protocol: 'tap')

@ -570,6 +570,13 @@ class AllPlatformTests(BasePlatformTests):
self.build() self.build()
self._run(self.mtest_command + ['--repeat=2']) self._run(self.mtest_command + ['--repeat=2'])
def test_verbose(self):
testdir = os.path.join(self.common_test_dir, '206 tap tests')
self.init(testdir)
self.build()
out = self._run(self.mtest_command + ['--suite', 'verbose'])
self.assertIn('1/1 subtest 1', out)
def test_testsetups(self): def test_testsetups(self):
if not shutil.which('valgrind'): if not shutil.which('valgrind'):
raise SkipTest('Valgrind not installed.') raise SkipTest('Valgrind not installed.')

Loading…
Cancel
Save