tests: Refactored test runner

pull/8859/head
Daniel Mensinger 4 years ago
parent e1708d3de9
commit 25df6e7d16
  1. 235
      run_project_tests.py

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from concurrent.futures import ProcessPoolExecutor, CancelledError, Future from concurrent.futures import ProcessPoolExecutor, CancelledError
from enum import Enum from enum import Enum
from io import StringIO from io import StringIO
from pathlib import Path, PurePath from pathlib import Path, PurePath
@ -44,7 +44,7 @@ from mesonbuild import mlog
from mesonbuild import mtest from mesonbuild import mtest
from mesonbuild.build import ConfigurationData from mesonbuild.build import ConfigurationData
from mesonbuild.mesonlib import MachineChoice, Popen_safe, TemporaryDirectoryWinProof from mesonbuild.mesonlib import MachineChoice, Popen_safe, TemporaryDirectoryWinProof
from mesonbuild.mlog import bold, green, red, yellow from mesonbuild.mlog import blue, bold, cyan, green, red, yellow, normal_green
from mesonbuild.coredata import backendlist, version as meson_version from mesonbuild.coredata import backendlist, version as meson_version
from mesonbuild.mesonmain import setup_vsenv from mesonbuild.mesonmain import setup_vsenv
from run_tests import get_fake_options, run_configure, get_meson_script from run_tests import get_fake_options, run_configure, get_meson_script
@ -56,6 +56,7 @@ if T.TYPE_CHECKING:
from types import FrameType from types import FrameType
from mesonbuild.environment import Environment from mesonbuild.environment import Environment
from mesonbuild._typing import Protocol from mesonbuild._typing import Protocol
from concurrent.futures import Future
class CompilerArgumentType(Protocol): class CompilerArgumentType(Protocol):
cross_file: str cross_file: str
@ -234,10 +235,13 @@ class TestDef:
def __repr__(self) -> str: def __repr__(self) -> str:
return '<{}: {:<48} [{}: {}] -- {}>'.format(type(self).__name__, str(self.path), self.name, self.args, self.skip) return '<{}: {:<48} [{}: {}] -- {}>'.format(type(self).__name__, str(self.path), self.name, self.args, self.skip)
def display_name(self) -> str: def display_name(self) -> mlog.TV_LoggableList:
# Remove the redundant 'test cases' part
section, id = self.path.parts[1:3]
res: mlog.TV_LoggableList = [f'{section}:', bold(id)]
if self.name: if self.name:
return f'{self.path.as_posix()} ({self.name})' res += [f' ({self.name})']
return self.path.as_posix() return res
def __lt__(self, other: object) -> bool: def __lt__(self, other: object) -> bool:
if isinstance(other, TestDef): if isinstance(other, TestDef):
@ -1076,6 +1080,49 @@ def run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
with open(txtname, 'w', encoding='utf-8', errors='ignore') as lf: with open(txtname, 'w', encoding='utf-8', errors='ignore') as lf:
return _run_tests(all_tests, log_name_base, failfast, extra_args, use_tmp, lf) return _run_tests(all_tests, log_name_base, failfast, extra_args, use_tmp, lf)
class TestStatus(Enum):
OK = normal_green(' [SUCCESS] ')
SKIP = yellow(' [SKIPPED] ')
ERROR = red(' [ERROR] ')
CANCELED = cyan('[CANCELED] ')
RUNNING = blue(' [RUNNING] ') # Should never be actually printed
LOG = bold(' [LOG] ') # Should never be actually printed
class TestRunFuture:
def __init__(self, name: str, testdef: TestDef, future: 'Future[T.Optional[TestResult]]') -> None:
super().__init__()
self.name = name
self.testdef = testdef
self.future = future
self.status = TestStatus.RUNNING
@property
def result(self) -> T.Optional[TestResult]:
return self.future.result()
def log(self) -> None:
without_install = '' if install_commands else '(without install)'
print(self.status.value, without_install, *self.testdef.display_name())
def update_log(self, new_status: TestStatus) -> None:
self.status = new_status
self.log()
def cancel(self) -> None:
if self.future.cancel():
self.status = TestStatus.CANCELED
class LogRunFuture:
def __init__(self, msgs: mlog.TV_LoggableList) -> None:
self.msgs = msgs
self.status = TestStatus.LOG
def log(self) -> None:
print(*self.msgs, sep='')
def cancel(self) -> None:
pass
def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]], def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
log_name_base: str, log_name_base: str,
failfast: bool, failfast: bool,
@ -1108,15 +1155,16 @@ def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
num_workers *= 2 num_workers *= 2
executor = ProcessPoolExecutor(max_workers=num_workers) executor = ProcessPoolExecutor(max_workers=num_workers)
futures: T.List[T.Union[TestRunFuture, LogRunFuture]] = []
# First, collect and start all tests and also queue log messages
for name, test_cases, skipped in all_tests: for name, test_cases, skipped in all_tests:
current_suite = ET.SubElement(junit_root, 'testsuite', {'name': name, 'tests': str(len(test_cases))}) current_suite = ET.SubElement(junit_root, 'testsuite', {'name': name, 'tests': str(len(test_cases))})
print()
if skipped: if skipped:
print(bold('Not running %s tests.' % name)) futures += [LogRunFuture(['\n', bold(f'Not running {name} tests.'), '\n'])]
else: else:
print(bold('Running %s tests.' % name)) futures += [LogRunFuture(['\n', bold(f'Running {name} tests.'), '\n'])]
print()
futures: T.List[T.Tuple[str, TestDef, Future[T.Optional[TestResult]]]] = []
for t in test_cases: for t in test_cases:
# Jenkins screws us over by automatically sorting test cases by name # Jenkins screws us over by automatically sorting test cases by name
# and getting it wrong by not doing logical number sorting. # and getting it wrong by not doing logical number sorting.
@ -1134,76 +1182,100 @@ def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
t.skip = skipped or t.skip t.skip = skipped or t.skip
result_future = executor.submit(run_test, t, extra_args + suite_args + t.args, should_fail, use_tmp) result_future = executor.submit(run_test, t, extra_args + suite_args + t.args, should_fail, use_tmp)
futures.append((testname, t, result_future)) futures += [TestRunFuture(testname, t, result_future)]
for (testname, t, result_future) in futures:
sys.stdout.flush() # Ensure we only cancel once
try: tests_canceled = False
result = result_future.result()
except CancelledError: # Wait and handle the test results and print the stored log output
continue for f in futures:
if (result is None) or (('MESON_SKIP_TEST' in result.stdo) and (skippable(name, t.path.as_posix()))): # Just a log entry to print something to stdout
print(yellow('Skipping:'), t.display_name()) sys.stdout.flush()
current_test = ET.SubElement(current_suite, 'testcase', {'name': testname, if isinstance(f, LogRunFuture):
'classname': name}) f.log()
ET.SubElement(current_test, 'skipped', {}) continue
skipped_tests += 1
# Acutal Test run
testname = f.name
t = f.testdef
try:
result = f.result
except (CancelledError, KeyboardInterrupt):
f.status = TestStatus.CANCELED
if stop and not tests_canceled:
num_running = sum([1 if f.status is TestStatus.RUNNING else 0 for f in futures])
for f2 in futures:
f2.cancel()
executor.shutdown()
num_canceled = sum([1 if f.status is TestStatus.CANCELED else 0 for f in futures])
print(f'\nCanceled {num_canceled} out of {num_running} running tests.')
print(f'Finishing the remaining {num_running - num_canceled} tests.\n')
tests_canceled = True
# Handle canceled tests
if f.status is TestStatus.CANCELED:
f.log()
continue
# Handle skipped tests
if (result is None) or (('MESON_SKIP_TEST' in result.stdo) and (skippable(name, t.path.as_posix()))):
f.update_log(TestStatus.SKIP)
current_test = ET.SubElement(current_suite, 'testcase', {'name': testname, 'classname': name})
ET.SubElement(current_test, 'skipped', {})
skipped_tests += 1
continue
# Handle Failed tests
if result.msg != '':
f.update_log(TestStatus.ERROR)
print(bold('During:'), result.step.name)
print(bold('Reason:'), result.msg)
failing_tests += 1
if result.step == BuildStep.configure and result.mlog != no_meson_log_msg:
# For configure failures, instead of printing stdout,
# print the meson log if available since it's a superset
# of stdout and often has very useful information.
failing_logs.append(result.mlog)
elif under_ci:
# Always print the complete meson log when running in
# a CI. This helps debugging issues that only occur in
# a hard to reproduce environment
failing_logs.append(result.mlog)
failing_logs.append(result.stdo)
else: else:
without_install = "" if len(install_commands) > 0 else " (without install)" failing_logs.append(result.stdo)
if result.msg != '': for cmd_res in result.cicmds:
print(red(f'Failed test{without_install} during {result.step.name}: {t.display_name()!r}')) failing_logs.append(cmd_res)
print('Reason:', result.msg) failing_logs.append(result.stde)
failing_tests += 1 if failfast:
if result.step == BuildStep.configure and result.mlog != no_meson_log_msg: print("Cancelling the rest of the tests")
# For configure failures, instead of printing stdout, for f in futures:
# print the meson log if available since it's a superset f.cancel()
# of stdout and often has very useful information. else:
failing_logs.append(result.mlog) f.update_log(TestStatus.OK)
elif under_ci: passing_tests += 1
# Always print the complete meson log when running in conf_time += result.conftime
# a CI. This helps debugging issues that only occur in build_time += result.buildtime
# a hard to reproduce environment test_time += result.testtime
failing_logs.append(result.mlog) total_time = conf_time + build_time + test_time
failing_logs.append(result.stdo) log_text_file(logfile, t.path, result)
else: current_test = ET.SubElement(
failing_logs.append(result.stdo) current_suite,
for cmd_res in result.cicmds: 'testcase',
failing_logs.append(cmd_res) {'name': testname, 'classname': name, 'time': '%.3f' % total_time}
failing_logs.append(result.stde) )
if failfast: if result.msg != '':
print("Cancelling the rest of the tests") ET.SubElement(current_test, 'failure', {'message': result.msg})
for (_, _, res) in futures: stdoel = ET.SubElement(current_test, 'system-out')
res.cancel() stdoel.text = result.stdo
else: stdeel = ET.SubElement(current_test, 'system-err')
print(f'Succeeded test{without_install}: {t.display_name()}') stdeel.text = result.stde
passing_tests += 1
conf_time += result.conftime print()
build_time += result.buildtime print("Total configuration time: %.2fs" % conf_time)
test_time += result.testtime print("Total build time: %.2fs" % build_time)
total_time = conf_time + build_time + test_time print("Total test time: %.2fs" % test_time)
log_text_file(logfile, t.path, result)
current_test = ET.SubElement(current_suite, 'testcase', {'name': testname,
'classname': name,
'time': '%.3f' % total_time})
if result.msg != '':
ET.SubElement(current_test, 'failure', {'message': result.msg})
stdoel = ET.SubElement(current_test, 'system-out')
stdoel.text = result.stdo
stdeel = ET.SubElement(current_test, 'system-err')
stdeel.text = result.stde
if stop:
print("Aborting..")
for f in futures:
f[2].cancel()
executor.shutdown()
raise StopException()
if failfast and failing_tests > 0:
break
print("\nTotal configuration time: %.2fs" % conf_time)
print("Total build time: %.2fs" % build_time)
print("Total test time: %.2fs" % test_time)
ET.ElementTree(element=junit_root).write(xmlname, xml_declaration=True, encoding='UTF-8') ET.ElementTree(element=junit_root).write(xmlname, xml_declaration=True, encoding='UTF-8')
return passing_tests, failing_tests, skipped_tests return passing_tests, failing_tests, skipped_tests
@ -1440,8 +1512,9 @@ if __name__ == '__main__':
(passing_tests, failing_tests, skipped_tests) = run_tests(all_tests, 'meson-test-run', options.failfast, options.extra_args, options.use_tmpdir) (passing_tests, failing_tests, skipped_tests) = run_tests(all_tests, 'meson-test-run', options.failfast, options.extra_args, options.use_tmpdir)
except StopException: except StopException:
pass pass
print('\nTotal passed tests:', green(str(passing_tests))) print()
print('Total failed tests:', red(str(failing_tests))) print('Total passed tests: ', green(str(passing_tests)))
print('Total failed tests: ', red(str(failing_tests)))
print('Total skipped tests:', yellow(str(skipped_tests))) print('Total skipped tests:', yellow(str(skipped_tests)))
if failing_tests > 0: if failing_tests > 0:
print('\nMesonlogs of failing tests\n') print('\nMesonlogs of failing tests\n')

Loading…
Cancel
Save