tests: Refactored test runner

pull/8859/head
Daniel Mensinger 4 years ago
parent e1708d3de9
commit 25df6e7d16
  1. 151
      run_project_tests.py

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from concurrent.futures import ProcessPoolExecutor, CancelledError, Future from concurrent.futures import ProcessPoolExecutor, CancelledError
from enum import Enum from enum import Enum
from io import StringIO from io import StringIO
from pathlib import Path, PurePath from pathlib import Path, PurePath
@ -44,7 +44,7 @@ from mesonbuild import mlog
from mesonbuild import mtest from mesonbuild import mtest
from mesonbuild.build import ConfigurationData from mesonbuild.build import ConfigurationData
from mesonbuild.mesonlib import MachineChoice, Popen_safe, TemporaryDirectoryWinProof from mesonbuild.mesonlib import MachineChoice, Popen_safe, TemporaryDirectoryWinProof
from mesonbuild.mlog import bold, green, red, yellow from mesonbuild.mlog import blue, bold, cyan, green, red, yellow, normal_green
from mesonbuild.coredata import backendlist, version as meson_version from mesonbuild.coredata import backendlist, version as meson_version
from mesonbuild.mesonmain import setup_vsenv from mesonbuild.mesonmain import setup_vsenv
from run_tests import get_fake_options, run_configure, get_meson_script from run_tests import get_fake_options, run_configure, get_meson_script
@ -56,6 +56,7 @@ if T.TYPE_CHECKING:
from types import FrameType from types import FrameType
from mesonbuild.environment import Environment from mesonbuild.environment import Environment
from mesonbuild._typing import Protocol from mesonbuild._typing import Protocol
from concurrent.futures import Future
class CompilerArgumentType(Protocol): class CompilerArgumentType(Protocol):
cross_file: str cross_file: str
@ -234,10 +235,13 @@ class TestDef:
def __repr__(self) -> str: def __repr__(self) -> str:
return '<{}: {:<48} [{}: {}] -- {}>'.format(type(self).__name__, str(self.path), self.name, self.args, self.skip) return '<{}: {:<48} [{}: {}] -- {}>'.format(type(self).__name__, str(self.path), self.name, self.args, self.skip)
def display_name(self) -> str: def display_name(self) -> mlog.TV_LoggableList:
# Remove the redundant 'test cases' part
section, id = self.path.parts[1:3]
res: mlog.TV_LoggableList = [f'{section}:', bold(id)]
if self.name: if self.name:
return f'{self.path.as_posix()} ({self.name})' res += [f' ({self.name})']
return self.path.as_posix() return res
def __lt__(self, other: object) -> bool: def __lt__(self, other: object) -> bool:
if isinstance(other, TestDef): if isinstance(other, TestDef):
@ -1076,6 +1080,49 @@ def run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
with open(txtname, 'w', encoding='utf-8', errors='ignore') as lf: with open(txtname, 'w', encoding='utf-8', errors='ignore') as lf:
return _run_tests(all_tests, log_name_base, failfast, extra_args, use_tmp, lf) return _run_tests(all_tests, log_name_base, failfast, extra_args, use_tmp, lf)
class TestStatus(Enum):
OK = normal_green(' [SUCCESS] ')
SKIP = yellow(' [SKIPPED] ')
ERROR = red(' [ERROR] ')
CANCELED = cyan('[CANCELED] ')
RUNNING = blue(' [RUNNING] ') # Should never be actually printed
LOG = bold(' [LOG] ') # Should never be actually printed
class TestRunFuture:
def __init__(self, name: str, testdef: TestDef, future: 'Future[T.Optional[TestResult]]') -> None:
super().__init__()
self.name = name
self.testdef = testdef
self.future = future
self.status = TestStatus.RUNNING
@property
def result(self) -> T.Optional[TestResult]:
return self.future.result()
def log(self) -> None:
without_install = '' if install_commands else '(without install)'
print(self.status.value, without_install, *self.testdef.display_name())
def update_log(self, new_status: TestStatus) -> None:
self.status = new_status
self.log()
def cancel(self) -> None:
if self.future.cancel():
self.status = TestStatus.CANCELED
class LogRunFuture:
def __init__(self, msgs: mlog.TV_LoggableList) -> None:
self.msgs = msgs
self.status = TestStatus.LOG
def log(self) -> None:
print(*self.msgs, sep='')
def cancel(self) -> None:
pass
def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]], def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
log_name_base: str, log_name_base: str,
failfast: bool, failfast: bool,
@ -1108,15 +1155,16 @@ def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
num_workers *= 2 num_workers *= 2
executor = ProcessPoolExecutor(max_workers=num_workers) executor = ProcessPoolExecutor(max_workers=num_workers)
futures: T.List[T.Union[TestRunFuture, LogRunFuture]] = []
# First, collect and start all tests and also queue log messages
for name, test_cases, skipped in all_tests: for name, test_cases, skipped in all_tests:
current_suite = ET.SubElement(junit_root, 'testsuite', {'name': name, 'tests': str(len(test_cases))}) current_suite = ET.SubElement(junit_root, 'testsuite', {'name': name, 'tests': str(len(test_cases))})
print()
if skipped: if skipped:
print(bold('Not running %s tests.' % name)) futures += [LogRunFuture(['\n', bold(f'Not running {name} tests.'), '\n'])]
else: else:
print(bold('Running %s tests.' % name)) futures += [LogRunFuture(['\n', bold(f'Running {name} tests.'), '\n'])]
print()
futures: T.List[T.Tuple[str, TestDef, Future[T.Optional[TestResult]]]] = []
for t in test_cases: for t in test_cases:
# Jenkins screws us over by automatically sorting test cases by name # Jenkins screws us over by automatically sorting test cases by name
# and getting it wrong by not doing logical number sorting. # and getting it wrong by not doing logical number sorting.
@ -1134,24 +1182,55 @@ def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
t.skip = skipped or t.skip t.skip = skipped or t.skip
result_future = executor.submit(run_test, t, extra_args + suite_args + t.args, should_fail, use_tmp) result_future = executor.submit(run_test, t, extra_args + suite_args + t.args, should_fail, use_tmp)
futures.append((testname, t, result_future)) futures += [TestRunFuture(testname, t, result_future)]
for (testname, t, result_future) in futures:
# Ensure we only cancel once
tests_canceled = False
# Wait and handle the test results and print the stored log output
for f in futures:
# Just a log entry to print something to stdout
sys.stdout.flush() sys.stdout.flush()
if isinstance(f, LogRunFuture):
f.log()
continue
# Acutal Test run
testname = f.name
t = f.testdef
try: try:
result = result_future.result() result = f.result
except CancelledError: except (CancelledError, KeyboardInterrupt):
f.status = TestStatus.CANCELED
if stop and not tests_canceled:
num_running = sum([1 if f.status is TestStatus.RUNNING else 0 for f in futures])
for f2 in futures:
f2.cancel()
executor.shutdown()
num_canceled = sum([1 if f.status is TestStatus.CANCELED else 0 for f in futures])
print(f'\nCanceled {num_canceled} out of {num_running} running tests.')
print(f'Finishing the remaining {num_running - num_canceled} tests.\n')
tests_canceled = True
# Handle canceled tests
if f.status is TestStatus.CANCELED:
f.log()
continue continue
# Handle skipped tests
if (result is None) or (('MESON_SKIP_TEST' in result.stdo) and (skippable(name, t.path.as_posix()))): if (result is None) or (('MESON_SKIP_TEST' in result.stdo) and (skippable(name, t.path.as_posix()))):
print(yellow('Skipping:'), t.display_name()) f.update_log(TestStatus.SKIP)
current_test = ET.SubElement(current_suite, 'testcase', {'name': testname, current_test = ET.SubElement(current_suite, 'testcase', {'name': testname, 'classname': name})
'classname': name})
ET.SubElement(current_test, 'skipped', {}) ET.SubElement(current_test, 'skipped', {})
skipped_tests += 1 skipped_tests += 1
else: continue
without_install = "" if len(install_commands) > 0 else " (without install)"
# Handle Failed tests
if result.msg != '': if result.msg != '':
print(red(f'Failed test{without_install} during {result.step.name}: {t.display_name()!r}')) f.update_log(TestStatus.ERROR)
print('Reason:', result.msg) print(bold('During:'), result.step.name)
print(bold('Reason:'), result.msg)
failing_tests += 1 failing_tests += 1
if result.step == BuildStep.configure and result.mlog != no_meson_log_msg: if result.step == BuildStep.configure and result.mlog != no_meson_log_msg:
# For configure failures, instead of printing stdout, # For configure failures, instead of printing stdout,
@ -1171,19 +1250,21 @@ def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
failing_logs.append(result.stde) failing_logs.append(result.stde)
if failfast: if failfast:
print("Cancelling the rest of the tests") print("Cancelling the rest of the tests")
for (_, _, res) in futures: for f in futures:
res.cancel() f.cancel()
else: else:
print(f'Succeeded test{without_install}: {t.display_name()}') f.update_log(TestStatus.OK)
passing_tests += 1 passing_tests += 1
conf_time += result.conftime conf_time += result.conftime
build_time += result.buildtime build_time += result.buildtime
test_time += result.testtime test_time += result.testtime
total_time = conf_time + build_time + test_time total_time = conf_time + build_time + test_time
log_text_file(logfile, t.path, result) log_text_file(logfile, t.path, result)
current_test = ET.SubElement(current_suite, 'testcase', {'name': testname, current_test = ET.SubElement(
'classname': name, current_suite,
'time': '%.3f' % total_time}) 'testcase',
{'name': testname, 'classname': name, 'time': '%.3f' % total_time}
)
if result.msg != '': if result.msg != '':
ET.SubElement(current_test, 'failure', {'message': result.msg}) ET.SubElement(current_test, 'failure', {'message': result.msg})
stdoel = ET.SubElement(current_test, 'system-out') stdoel = ET.SubElement(current_test, 'system-out')
@ -1191,17 +1272,8 @@ def _run_tests(all_tests: T.List[T.Tuple[str, T.List[TestDef], bool]],
stdeel = ET.SubElement(current_test, 'system-err') stdeel = ET.SubElement(current_test, 'system-err')
stdeel.text = result.stde stdeel.text = result.stde
if stop: print()
print("Aborting..") print("Total configuration time: %.2fs" % conf_time)
for f in futures:
f[2].cancel()
executor.shutdown()
raise StopException()
if failfast and failing_tests > 0:
break
print("\nTotal configuration time: %.2fs" % conf_time)
print("Total build time: %.2fs" % build_time) print("Total build time: %.2fs" % build_time)
print("Total test time: %.2fs" % test_time) print("Total test time: %.2fs" % test_time)
ET.ElementTree(element=junit_root).write(xmlname, xml_declaration=True, encoding='UTF-8') ET.ElementTree(element=junit_root).write(xmlname, xml_declaration=True, encoding='UTF-8')
@ -1440,7 +1512,8 @@ if __name__ == '__main__':
(passing_tests, failing_tests, skipped_tests) = run_tests(all_tests, 'meson-test-run', options.failfast, options.extra_args, options.use_tmpdir) (passing_tests, failing_tests, skipped_tests) = run_tests(all_tests, 'meson-test-run', options.failfast, options.extra_args, options.use_tmpdir)
except StopException: except StopException:
pass pass
print('\nTotal passed tests:', green(str(passing_tests))) print()
print('Total passed tests: ', green(str(passing_tests)))
print('Total failed tests: ', red(str(failing_tests))) print('Total failed tests: ', red(str(failing_tests)))
print('Total skipped tests:', yellow(str(skipped_tests))) print('Total skipped tests:', yellow(str(skipped_tests)))
if failing_tests > 0: if failing_tests > 0:

Loading…
Cancel
Save