Merge pull request #730 from mesonbuild/newtest

New testing tool
pull/1068/head
Jussi Pakkanen 8 years ago committed by GitHub
commit aa9668a2fc
  1. 5
      mesonbuild/backend/backends.py
  2. 47
      mesonbuild/backend/ninjabackend.py
  3. 2
      mesonbuild/coredata.py
  4. 6
      mesonbuild/environment.py
  5. 13
      mesonbuild/interpreter.py
  6. 6
      mesonbuild/mesonmain.py
  7. 99
      mesonbuild/scripts/meson_benchmark.py
  8. 290
      mesonbuild/scripts/meson_test.py
  9. 375
      mesontest.py
  10. 6
      run_project_tests.py

@ -50,7 +50,7 @@ class ExecutableSerialisation():
class TestSerialisation:
def __init__(self, name, suite, fname, is_cross, exe_wrapper, is_parallel, cmd_args, env,
should_fail, valgrind_args, timeout, workdir, extra_paths):
should_fail, timeout, workdir, extra_paths):
self.name = name
self.suite = suite
self.fname = fname
@ -60,7 +60,6 @@ class TestSerialisation:
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.valgrind_args = valgrind_args
self.timeout = timeout
self.workdir = workdir
self.extra_paths = extra_paths
@ -443,7 +442,7 @@ class Backend():
a = os.path.join(self.environment.get_build_dir(), a.rel_to_builddir(self.build_to_src))
cmd_args.append(a)
ts = TestSerialisation(t.get_name(), t.suite, fname, is_cross, exe_wrapper,
t.is_parallel, cmd_args, t.env, t.should_fail, t.valgrind_args,
t.is_parallel, cmd_args, t.env, t.should_fail,
t.timeout, t.workdir, extra_paths)
arr.append(ts)
pickle.dump(arr, datafile)

@ -142,7 +142,6 @@ class NinjaBackend(backends.Backend):
self.ninja_filename = 'build.ninja'
self.fortran_deps = {}
self.all_outputs = {}
self.valgrind = environment.find_valgrind()
def detect_vs_dep_prefix(self, tempfilename):
'''VS writes its dependency in a locale dependent format.
@ -708,56 +707,25 @@ int dummy;
dst_dir = os.path.join(self.environment.get_prefix(), sd.install_dir)
d.install_subdirs.append([src_dir, inst_dir, dst_dir])
def write_test_suite_targets(self, cmd, outfile):
suites = {}
for t in self.build.get_tests():
for s in t.suite:
suites[s] = True
suites = list(suites.keys())
suites.sort()
for s in suites:
if s == '':
visible_name = 'for top level tests'
else:
visible_name = s
elem = NinjaBuildElement(self.all_outputs, 'test:' + s, 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd + ['--suite=' + s])
elem.add_item('DESC', 'Running test suite %s.' % visible_name)
elem.add_item('pool', 'console')
elem.write(outfile)
if self.valgrind:
velem = NinjaBuildElement(self.all_outputs, 'test-valgrind:' + s, 'CUSTOM_COMMAND', ['all', 'PHONY'])
velem.add_item('COMMAND', cmd + ['--wrapper=' + self.valgrind, '--suite=' + s])
velem.add_item('DESC', 'Running test suite %s under Valgrind.' % visible_name)
velem.add_item('pool', 'console')
velem.write(outfile)
def generate_tests(self, outfile):
(test_data, benchmark_data) = self.serialise_tests()
script_root = self.environment.get_script_dir()
cmd = [ sys.executable, self.environment.get_build_command(), '--internal', 'test' ]
self.serialise_tests()
meson_exe = self.environment.get_build_command()
(base, ext) = os.path.splitext(meson_exe)
test_exe = base + 'test' + ext
cmd = [sys.executable, test_exe]
if not self.environment.coredata.get_builtin_option('stdsplit'):
cmd += ['--no-stdsplit']
if self.environment.coredata.get_builtin_option('errorlogs'):
cmd += ['--print-errorlogs']
cmd += [ test_data ]
elem = NinjaBuildElement(self.all_outputs, 'test', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running all tests.')
elem.add_item('pool', 'console')
elem.write(outfile)
self.write_test_suite_targets(cmd, outfile)
if self.valgrind:
velem = NinjaBuildElement(self.all_outputs, 'test-valgrind', 'CUSTOM_COMMAND', ['all', 'PHONY'])
velem.add_item('COMMAND', cmd + ['--wrapper=' + self.valgrind])
velem.add_item('DESC', 'Running test suite under Valgrind.')
velem.add_item('pool', 'console')
velem.write(outfile)
# And then benchmarks.
cmd = [sys.executable, self.environment.get_build_command(), '--internal', 'benchmark', benchmark_data]
cmd = [sys.executable, test_exe, '--benchmark','--logbase',
'benchmarklog', '--num-processes=1']
elem = NinjaBuildElement(self.all_outputs, 'benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running benchmark suite.')
@ -804,7 +772,6 @@ int dummy;
def generate_jar_target(self, target, outfile):
fname = target.get_filename()
subdir = target.get_subdir()
outname_rel = os.path.join(self.get_target_dir(target), fname)
src_list = target.get_sources()
class_list = []

@ -241,8 +241,6 @@ forbidden_target_names = {'clean': None,
'all': None,
'test': None,
'test:': None,
'test-valgrind': None,
'test-valgrind:': None,
'benchmark': None,
'install': None,
'build.ninja': None,

@ -39,12 +39,6 @@ def find_coverage_tools():
genhtml_exe = None
return (gcovr_exe, lcov_exe, genhtml_exe)
def find_valgrind():
valgrind_exe = 'valgrind'
if not mesonlib.exe_exists([valgrind_exe, '--version']):
valgrind_exe = None
return valgrind_exe
def detect_ninja():
for n in ['ninja', 'ninja-build']:
try:

@ -610,7 +610,7 @@ class RunTargetHolder(InterpreterObject):
self.held_object = build.RunTarget(name, command, args, dependencies, subdir)
class Test(InterpreterObject):
def __init__(self, name, suite, exe, is_parallel, cmd_args, env, should_fail, valgrind_args, timeout, workdir):
def __init__(self, name, suite, exe, is_parallel, cmd_args, env, should_fail, timeout, workdir):
InterpreterObject.__init__(self)
self.name = name
self.suite = suite
@ -619,7 +619,6 @@ class Test(InterpreterObject):
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.valgrind_args = valgrind_args
self.timeout = timeout
self.workdir = workdir
@ -2124,12 +2123,8 @@ requirements use the version keyword argument instead.''')
if ' ' in k:
raise InterpreterException('Env var key must not have spaces in it.')
env[k] = val
valgrind_args = kwargs.get('valgrind_args', [])
if not isinstance(valgrind_args, list):
valgrind_args = [valgrind_args]
for a in valgrind_args:
if not isinstance(a, str):
raise InterpreterException('Valgrind_arg not a string.')
if not isinstance(envlist, list):
envlist = [envlist]
should_fail = kwargs.get('should_fail', False)
if not isinstance(should_fail, bool):
raise InterpreterException('Keyword argument should_fail must be a boolean.')
@ -2152,7 +2147,7 @@ requirements use the version keyword argument instead.''')
s = '.' + s
newsuite.append(self.subproject.replace(' ', '_').replace('.', '_') + s)
suite = newsuite
t = Test(args[0], suite, args[1].held_object, par, cmd_args, env, should_fail, valgrind_args, timeout, workdir)
t = Test(args[0], suite, args[1].held_object, par, cmd_args, env, should_fail, timeout, workdir)
if is_base_test:
self.build.tests.append(t)
mlog.debug('Adding test "', mlog.bold(args[0]), '".', sep='')

@ -192,12 +192,6 @@ def run_script_command(args):
if cmdname == 'exe':
import mesonbuild.scripts.meson_exe as abc
cmdfunc = abc.run
elif cmdname == 'test':
import mesonbuild.scripts.meson_test as abc
cmdfunc = abc.run
elif cmdname == 'benchmark':
import mesonbuild.scripts.meson_benchmark as abc
cmdfunc = abc.run
elif cmdname == 'install':
import mesonbuild.scripts.meson_install as abc
cmdfunc = abc.run

@ -1,99 +0,0 @@
#!/usr/bin/env python3
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess, sys, os, argparse
import pickle, statistics, json
from . import meson_test
parser = argparse.ArgumentParser()
parser.add_argument('--wd', default=None, dest='wd',
help='directory to cd into before running')
parser.add_argument('args', nargs='+')
def print_stats(numlen, num_tests, name, res, i, duration, stdev):
startpad = ' '*(numlen - len('%d' % (i+1)))
num = '%s%d/%d' % (startpad, i+1, num_tests)
padding1 = ' '*(38-len(name))
padding2 = ' '*(8-len(res))
result_str = '%s %s %s%s%s%5.5f s +- %5.5f s' % \
(num, name, padding1, res, padding2, duration, stdev)
print(result_str)
# write_json_log(jsonlogfile, name, result)
def print_json_log(jsonlogfile, rawruns, test_name, i):
jsonobj = {'name' : test_name}
runs = []
for r in rawruns:
runobj = {'duration': r.duration,
'stdout': r.stdo,
'returncode' : r.returncode,
'duration' : r.duration}
if r.stde:
runobj['stderr'] = r.stde
runs.append(runobj)
jsonobj['runs'] = runs
jsonlogfile.write(json.dumps(jsonobj) + '\n')
jsonlogfile.flush()
def run_benchmarks(options, datafile):
failed_tests = 0
logfile_base = 'meson-logs/benchmarklog'
jsonlogfilename = logfile_base+ '.json'
with open(datafile, 'rb') as f:
tests = pickle.load(f)
num_tests = len(tests)
if num_tests == 0:
print('No benchmarks defined.')
return 0
iteration_count = 5
wrap = [] # Benchmarks on cross builds are pointless so don't support them.
with open(jsonlogfilename, 'w') as jsonlogfile:
for i, test in enumerate(tests):
runs = []
durations = []
failed = False
for _ in range(iteration_count):
res = meson_test.run_single_test(wrap, test)
runs.append(res)
durations.append(res.duration)
if res.returncode != 0:
failed = True
mean = statistics.mean(durations)
stddev = statistics.stdev(durations)
if failed:
resultstr = 'FAIL'
failed_tests += 1
else:
resultstr = 'OK'
print_stats(3, num_tests, test.name, resultstr, i, mean, stddev)
print_json_log(jsonlogfile, runs, test.name, i)
print('\nFull log written to meson-logs/benchmarklog.json.')
return failed_tests
def run(args):
global failed_tests
options = parser.parse_args(args)
if len(options.args) != 1:
print('Benchmark runner for Meson. Do not run on your own, mmm\'kay?')
print('%s [data file]' % sys.argv[0])
if options.wd is not None:
os.chdir(options.wd)
datafile = options.args[0]
returncode = run_benchmarks(options, datafile)
return returncode
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))

@ -1,290 +0,0 @@
#!/usr/bin/env python3
# Copyright 2013-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mesonbuild
from .. import build
import sys, os, subprocess, time, datetime, pickle, multiprocessing, json
import concurrent.futures as conc
import argparse
import platform
import signal
def is_windows():
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
collected_logs = []
error_count = 0
options = None
parser = argparse.ArgumentParser()
parser.add_argument('--wrapper', default=None, dest='wrapper',
help='wrapper to run tests with (e.g. valgrind)')
parser.add_argument('--wd', default=None, dest='wd',
help='directory to cd into before running')
parser.add_argument('--suite', default=None, dest='suite',
help='Only run tests belonging to this suite.')
parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false',
help='Do not split stderr and stdout in test logs.')
parser.add_argument('--print-errorlogs', default=False, action='store_true',
help="Whether to print faling tests' logs.")
parser.add_argument('args', nargs='+')
class TestRun():
def __init__(self, res, returncode, should_fail, duration, stdo, stde, cmd,
env):
self.res = res
self.returncode = returncode
self.duration = duration
self.stdo = stdo
self.stde = stde
self.cmd = cmd
self.env = env
self.should_fail = should_fail
def get_log(self):
res = '--- command ---\n'
if self.cmd is None:
res += 'NONE\n'
else:
res += "\n%s %s\n" %(' '.join(
["%s='%s'" % (k, v) for k, v in self.env.items()]),
' ' .join(self.cmd))
if self.stdo:
res += '--- stdout ---\n'
res += self.stdo
if self.stde:
if res[-1:] != '\n':
res += '\n'
res += '--- stderr ---\n'
res += self.stde
if res[-1:] != '\n':
res += '\n'
res += '-------\n\n'
return res
def decode(stream):
try:
return stream.decode('utf-8')
except UnicodeDecodeError:
return stream.decode('iso-8859-1', errors='ignore')
def write_json_log(jsonlogfile, test_name, result):
jresult = {'name' : test_name,
'stdout' : result.stdo,
'result' : result.res,
'duration' : result.duration,
'returncode' : result.returncode,
'command' : result.cmd,
'env' : result.env}
if result.stde:
jresult['stderr'] = result.stde
jsonlogfile.write(json.dumps(jresult) + '\n')
def run_with_mono(fname):
if fname.endswith('.exe') and not is_windows():
return True
return False
def run_single_test(wrap, test):
global options
if test.fname[0].endswith('.jar'):
cmd = ['java', '-jar'] + test.fname
elif not test.is_cross and run_with_mono(test.fname[0]):
cmd = ['mono'] + test.fname
else:
if test.is_cross:
if test.exe_runner is None:
# Can not run test on cross compiled executable
# because there is no execute wrapper.
cmd = None
else:
cmd = [test.exe_runner] + test.fname
else:
cmd = test.fname
if cmd is None:
res = 'SKIP'
duration = 0.0
stdo = 'Not run because can not execute cross compiled binaries.'
stde = None
returncode = -1
else:
if len(wrap) > 0 and 'valgrind' in wrap[0]:
cmd = wrap + test.valgrind_args + cmd + test.cmd_args
else:
cmd = wrap + cmd + test.cmd_args
starttime = time.time()
child_env = os.environ.copy()
if isinstance(test.env, build.EnvironmentVariables):
test.env = test.env.get_env(child_env)
child_env.update(test.env)
if len(test.extra_paths) > 0:
child_env['PATH'] = (child_env['PATH'] +
os.pathsep.join([''] + test.extra_paths))
if is_windows():
setsid = None
else:
setsid = os.setsid
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE if options and options.split else subprocess.STDOUT,
env=child_env,
cwd=test.workdir,
preexec_fn=setsid)
timed_out = False
try:
(stdo, stde) = p.communicate(timeout=test.timeout)
except subprocess.TimeoutExpired:
timed_out = True
# Python does not provide multiplatform support for
# killing a process and all its children so we need
# to roll our own.
if is_windows():
subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)])
else:
os.killpg(os.getpgid(p.pid), signal.SIGKILL)
(stdo, stde) = p.communicate()
endtime = time.time()
duration = endtime - starttime
stdo = decode(stdo)
if stde:
stde = decode(stde)
if timed_out:
res = 'TIMEOUT'
elif (not test.should_fail and p.returncode == 0) or \
(test.should_fail and p.returncode != 0):
res = 'OK'
else:
res = 'FAIL'
returncode = p.returncode
return TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env)
def print_stats(numlen, tests, name, result, i, logfile, jsonlogfile):
global collected_logs, error_count, options
startpad = ' '*(numlen - len('%d' % (i+1)))
num = '%s%d/%d' % (startpad, i+1, len(tests))
padding1 = ' '*(38-len(name))
padding2 = ' '*(8-len(result.res))
result_str = '%s %s %s%s%s%5.2f s' % \
(num, name, padding1, result.res, padding2, result.duration)
print(result_str)
result_str += "\n\n" + result.get_log()
if (result.returncode != 0) != result.should_fail:
error_count += 1
if options.print_errorlogs:
collected_logs.append(result_str)
logfile.write(result_str)
write_json_log(jsonlogfile, name, result)
def drain_futures(futures):
for i in futures:
(result, numlen, tests, name, i, logfile, jsonlogfile) = i
print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile)
def filter_tests(suite, tests):
if suite is None:
return tests
return [x for x in tests if suite in x.suite]
def run_tests(datafilename):
global options
logfile_base = 'meson-logs/testlog'
if options.wrapper is None:
wrap = []
logfilename = logfile_base + '.txt'
jsonlogfilename = logfile_base+ '.json'
else:
wrap = [options.wrapper]
logfilename = logfile_base + '-' + options.wrapper.replace(' ', '_') + '.txt'
jsonlogfilename = logfile_base + '-' + options.wrapper.replace(' ', '_') + '.json'
with open(datafilename, 'rb') as f:
tests = pickle.load(f)
if len(tests) == 0:
print('No tests defined.')
return
numlen = len('%d' % len(tests))
varname = 'MESON_TESTTHREADS'
if varname in os.environ:
try:
num_workers = int(os.environ[varname])
except ValueError:
print('Invalid value in %s, using 1 thread.' % varname)
num_workers = 1
else:
num_workers = multiprocessing.cpu_count()
executor = conc.ThreadPoolExecutor(max_workers=num_workers)
futures = []
filtered_tests = filter_tests(options.suite, tests)
with open(jsonlogfilename, 'w') as jsonlogfile, \
open(logfilename, 'w') as logfile:
logfile.write('Log of Meson test suite run on %s.\n\n' %
datetime.datetime.now().isoformat())
for i, test in enumerate(filtered_tests):
if test.suite[0] == '':
visible_name = test.name
else:
if options.suite is not None:
visible_name = options.suite + ' / ' + test.name
else:
visible_name = test.suite[0] + ' / ' + test.name
if not test.is_parallel:
drain_futures(futures)
futures = []
res = run_single_test(wrap, test)
print_stats(numlen, filtered_tests, visible_name, res, i,
logfile, jsonlogfile)
else:
f = executor.submit(run_single_test, wrap, test)
futures.append((f, numlen, filtered_tests, visible_name, i,
logfile, jsonlogfile))
drain_futures(futures)
return logfilename
def run(args):
global collected_logs, error_count, options
collected_logs = [] # To avoid state leaks when invoked multiple times (running tests in-process)
error_count = 0
options = parser.parse_args(args)
if len(options.args) != 1:
print('Test runner for Meson. Do not run on your own, mmm\'kay?')
print('%s [data file]' % sys.argv[0])
if options.wd is not None:
os.chdir(options.wd)
datafile = options.args[0]
logfilename = run_tests(datafile)
if len(collected_logs) > 0:
if len(collected_logs) > 10:
print('\nThe output from 10 first failed tests:\n')
else:
print('\nThe output from the failed tests:\n')
for log in collected_logs[:10]:
lines = log.splitlines()
if len(lines) > 100:
print(lines[0])
print('--- Listing only the last 100 lines from a long log. ---')
lines = lines[-99:]
for line in lines:
print(line)
if logfilename:
print('Full log written to %s.' % logfilename)
return error_count
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))

@ -0,0 +1,375 @@
#!/usr/bin/env python3
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A tool to run tests in many different ways.
import subprocess, sys, os, argparse
import pickle
import mesonbuild
from mesonbuild import build
from mesonbuild import environment
import time, datetime, pickle, multiprocessing, json
import concurrent.futures as conc
import platform
import signal
def is_windows():
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
def determine_worker_count():
varname = 'MESON_TESTTHREADS'
if varname in os.environ:
try:
num_workers = int(os.environ[varname])
except ValueError:
print('Invalid value in %s, using 1 thread.' % varname)
num_workers = 1
else:
try:
# Fails in some weird environments such as Debian
# reproducible build.
num_workers = multiprocessing.cpu_count()
except Exception:
num_workers = 1
return num_workers
parser = argparse.ArgumentParser()
parser.add_argument('--repeat', default=1, dest='repeat', type=int,
help='Number of times to run the tests.')
parser.add_argument('--gdb', default=False, dest='gdb', action='store_true',
help='Run test under gdb.')
parser.add_argument('--list', default=False, dest='list', action='store_true',
help='List available tests.')
parser.add_argument('--wrapper', default=None, dest='wrapper',
help='wrapper to run tests with (e.g. Valgrind)')
parser.add_argument('--wd', default=None, dest='wd',
help='directory to cd into before running')
parser.add_argument('--suite', default=None, dest='suite',
help='Only run tests belonging to the given suite.')
parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false',
help='Do not split stderr and stdout in test logs.')
parser.add_argument('--print-errorlogs', default=False, action='store_true',
help="Whether to print faling tests' logs.")
parser.add_argument('--benchmark', default=False, action='store_true',
help="Run benchmarks instead of tests.")
parser.add_argument('--logbase', default='testlog',
help="Base name for log file.")
parser.add_argument('--num-processes', default=determine_worker_count(), type=int,
help='How many parallel processes to use.')
parser.add_argument('args', nargs='*')
class TestRun():
def __init__(self, res, returncode, should_fail, duration, stdo, stde, cmd,
env):
self.res = res
self.returncode = returncode
self.duration = duration
self.stdo = stdo
self.stde = stde
self.cmd = cmd
self.env = env
self.should_fail = should_fail
def get_log(self):
res = '--- command ---\n'
if self.cmd is None:
res += 'NONE\n'
else:
res += "\n%s %s\n" %(' '.join(
["%s='%s'" % (k, v) for k, v in self.env.items()]),
' ' .join(self.cmd))
if self.stdo:
res += '--- stdout ---\n'
res += self.stdo
if self.stde:
if res[-1:] != '\n':
res += '\n'
res += '--- stderr ---\n'
res += self.stde
if res[-1:] != '\n':
res += '\n'
res += '-------\n\n'
return res
def decode(stream):
try:
return stream.decode('utf-8')
except UnicodeDecodeError:
return stream.decode('iso-8859-1', errors='ignore')
def write_json_log(jsonlogfile, test_name, result):
jresult = {'name' : test_name,
'stdout' : result.stdo,
'result' : result.res,
'duration' : result.duration,
'returncode' : result.returncode,
'command' : result.cmd,
'env' : result.env}
if result.stde:
jresult['stderr'] = result.stde
jsonlogfile.write(json.dumps(jresult) + '\n')
def run_with_mono(fname):
if fname.endswith('.exe') and not is_windows():
return True
return False
class TestHarness:
def __init__(self, options):
self.options = options
self.collected_logs = []
self.error_count = 0
self.is_run = False
if self.options.benchmark:
self.datafile = 'meson-private/meson_benchmark_setup.dat'
else:
self.datafile = 'meson-private/meson_test_setup.dat'
def run_single_test(self, wrap, test):
if test.fname[0].endswith('.jar'):
cmd = ['java', '-jar'] + test.fname
elif not test.is_cross and run_with_mono(test.fname[0]):
cmd = ['mono'] + test.fname
else:
if test.is_cross:
if test.exe_runner is None:
# Can not run test on cross compiled executable
# because there is no execute wrapper.
cmd = None
else:
cmd = [test.exe_runner] + test.fname
else:
cmd = test.fname
if cmd is None:
res = 'SKIP'
duration = 0.0
stdo = 'Not run because can not execute cross compiled binaries.'
stde = None
returncode = -1
else:
cmd = wrap + cmd + test.cmd_args
starttime = time.time()
child_env = os.environ.copy()
if isinstance(test.env, build.EnvironmentVariables):
test.env = test.env.get_env(child_env)
child_env.update(test.env)
if len(test.extra_paths) > 0:
child_env['PATH'] = child_env['PATH'] + ';'.join([''] + test.extra_paths)
if is_windows():
setsid = None
else:
setsid = os.setsid
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE if self.options and self.options.split else subprocess.STDOUT,
env=child_env,
cwd=test.workdir,
preexec_fn=setsid)
timed_out = False
try:
(stdo, stde) = p.communicate(timeout=test.timeout)
except subprocess.TimeoutExpired:
timed_out = True
# Python does not provide multiplatform support for
# killing a process and all its children so we need
# to roll our own.
if is_windows():
subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)])
else:
os.killpg(os.getpgid(p.pid), signal.SIGKILL)
(stdo, stde) = p.communicate()
endtime = time.time()
duration = endtime - starttime
stdo = decode(stdo)
if stde:
stde = decode(stde)
if timed_out:
res = 'TIMEOUT'
elif (not test.should_fail and p.returncode == 0) or \
(test.should_fail and p.returncode != 0):
res = 'OK'
else:
res = 'FAIL'
returncode = p.returncode
return TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env)
def print_stats(self, numlen, tests, name, result, i, logfile, jsonlogfile):
startpad = ' '*(numlen - len('%d' % (i+1)))
num = '%s%d/%d' % (startpad, i+1, len(tests))
padding1 = ' '*(38-len(name))
padding2 = ' '*(8-len(result.res))
result_str = '%s %s %s%s%s%5.2f s' % \
(num, name, padding1, result.res, padding2, result.duration)
print(result_str)
result_str += "\n\n" + result.get_log()
if (result.returncode != 0) != result.should_fail:
self.error_count += 1
if self.options.print_errorlogs:
self.collected_logs.append(result_str)
logfile.write(result_str)
write_json_log(jsonlogfile, name, result)
def doit(self):
if self.is_run:
raise RuntimeError('Test harness object can only be used once.')
if not os.path.isfile(self.datafile):
print('Test data file. Probably this means that you did not run this in the build directory.')
return 1
self.is_run = True
logfilename = self.run_tests(self.datafile, self.options.logbase)
if len(self.collected_logs) > 0:
if len(self.collected_logs) > 10:
print('\nThe output from 10 first failed tests:\n')
else:
print('\nThe output from the failed tests:\n')
for log in self.collected_logs[:10]:
lines = log.splitlines()
if len(lines) > 100:
print(lines[0])
print('--- Listing only the last 100 lines from a long log. ---')
lines = lines[-99:]
for line in lines:
print(line)
print('Full log written to %s.' % logfilename)
return self.error_count
def run_tests(self, datafilename, log_base):
logfile_base = os.path.join('meson-logs', log_base)
if self.options.wrapper is None:
wrap = []
logfilename = logfile_base + '.txt'
jsonlogfilename = logfile_base+ '.json'
else:
wrap = self.options.wrapper.split()
namebase = wrap[0]
logfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.txt'
jsonlogfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.json'
with open(datafilename, 'rb') as f:
tests = pickle.load(f)
if len(tests) == 0:
print('No tests defined.')
return
numlen = len('%d' % len(tests))
executor = conc.ThreadPoolExecutor(max_workers=self.options.num_processes)
futures = []
filtered_tests = filter_tests(self.options.suite, tests)
with open(jsonlogfilename, 'w') as jsonlogfile, \
open(logfilename, 'w') as logfile:
logfile.write('Log of Meson test suite run on %s.\n\n' %
datetime.datetime.now().isoformat())
for i, test in enumerate(filtered_tests):
if test.suite[0] == '':
visible_name = test.name
else:
if self.options.suite is not None:
visible_name = self.options.suite + ' / ' + test.name
else:
visible_name = test.suite[0] + ' / ' + test.name
if not test.is_parallel:
self.drain_futures(futures)
futures = []
res = self.run_single_test(wrap, test)
self.print_stats(numlen, filtered_tests, visible_name, res, i,
logfile, jsonlogfile)
else:
f = executor.submit(self.run_single_test, wrap, test)
futures.append((f, numlen, filtered_tests, visible_name, i,
logfile, jsonlogfile))
self.drain_futures(futures)
return logfilename
def drain_futures(self, futures):
for i in futures:
(result, numlen, tests, name, i, logfile, jsonlogfile) = i
self.print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile)
def run_special(self):
'Tests run by the user, usually something like "under gdb 1000 times".'
if self.is_run:
raise RuntimeError('Can not use run_special after a full run.')
if self.options.wrapper is not None:
wrap = self.options.wrapper.split(' ')
else:
wrap = []
if self.options.gdb and len(wrap) > 0:
print('Can not specify both a wrapper and gdb.')
return 1
if os.path.isfile('build.ninja'):
subprocess.check_call([environment.detect_ninja(), 'all'])
tests = pickle.load(open(self.datafile, 'rb'))
if self.options.list:
for i in tests:
print(i.name)
return 0
for t in tests:
if t.name in self.options.args:
for i in range(self.options.repeat):
print('Running: %s %d/%d' % (t.name, i+1, self.options.repeat))
if self.options.gdb:
gdbrun(t)
else:
res = self.run_single_test(wrap, t)
if (res.returncode == 0 and res.should_fail) or \
(res.returncode != 0 and not res.should_fail):
print('Test failed:\n\n-- stdout --\n')
print(res.stdo)
print('\n-- stderr --\n')
print(res.stde)
return 1
return 0
def filter_tests(suite, tests):
if suite is None:
return tests
return [x for x in tests if suite in x.suite]
def gdbrun(test):
child_env = os.environ.copy()
child_env.update(test.env)
# On success will exit cleanly. On failure gdb will ask user
# if they really want to exit.
exe = test.fname
args = test.cmd_args
if len(args) > 0:
argset = ['-ex', 'set args ' + ' '.join(args)]
else:
argset = []
cmd = ['gdb', '--quiet'] + argset + ['-ex', 'run', '-ex', 'quit'] + exe
# FIXME a ton of stuff. run_single_test grabs stdout & co,
# which we do not want to do when running under gdb.
p = subprocess.Popen(cmd,
env=child_env,
cwd=test.workdir,
)
p.communicate()
def run(args):
options = parser.parse_args(args)
if options.benchmark:
options.num_processes = 1
th = TestHarness(options)
if len(options.args) == 0:
return th.doit()
return th.run_special()
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))

@ -19,12 +19,12 @@ import os, subprocess, shutil, sys, signal
from io import StringIO
from ast import literal_eval
import sys, tempfile
import mesontest
from mesonbuild import environment
from mesonbuild import mesonlib
from mesonbuild import mlog
from mesonbuild import mesonmain
from mesonbuild.mesonlib import stringlistify
from mesonbuild.scripts import meson_test, meson_benchmark
import argparse
import xml.etree.ElementTree as ET
import time
@ -211,8 +211,8 @@ def run_test_inprocess(testdir):
old_cwd = os.getcwd()
os.chdir(testdir)
try:
returncode_test = meson_test.run(['meson-private/meson_test_setup.dat'])
returncode_benchmark = meson_benchmark.run(['meson-private/meson_benchmark_setup.dat'])
returncode_test = mesontest.run([])
returncode_benchmark = mesontest.run(['--benchmark', '--logbase', 'benchmarklog'])
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr

Loading…
Cancel
Save