commit
aa9668a2fc
10 changed files with 391 additions and 458 deletions
@ -1,99 +0,0 @@ |
||||
#!/usr/bin/env python3 |
||||
|
||||
# Copyright 2015 The Meson development team |
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
# you may not use this file except in compliance with the License. |
||||
# You may obtain a copy of the License at |
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
|
||||
import subprocess, sys, os, argparse |
||||
import pickle, statistics, json |
||||
from . import meson_test |
||||
|
||||
parser = argparse.ArgumentParser() |
||||
parser.add_argument('--wd', default=None, dest='wd', |
||||
help='directory to cd into before running') |
||||
parser.add_argument('args', nargs='+') |
||||
|
||||
def print_stats(numlen, num_tests, name, res, i, duration, stdev): |
||||
startpad = ' '*(numlen - len('%d' % (i+1))) |
||||
num = '%s%d/%d' % (startpad, i+1, num_tests) |
||||
padding1 = ' '*(38-len(name)) |
||||
padding2 = ' '*(8-len(res)) |
||||
result_str = '%s %s %s%s%s%5.5f s +- %5.5f s' % \ |
||||
(num, name, padding1, res, padding2, duration, stdev) |
||||
print(result_str) |
||||
# write_json_log(jsonlogfile, name, result) |
||||
|
||||
def print_json_log(jsonlogfile, rawruns, test_name, i): |
||||
jsonobj = {'name' : test_name} |
||||
runs = [] |
||||
for r in rawruns: |
||||
runobj = {'duration': r.duration, |
||||
'stdout': r.stdo, |
||||
'returncode' : r.returncode, |
||||
'duration' : r.duration} |
||||
if r.stde: |
||||
runobj['stderr'] = r.stde |
||||
runs.append(runobj) |
||||
jsonobj['runs'] = runs |
||||
jsonlogfile.write(json.dumps(jsonobj) + '\n') |
||||
jsonlogfile.flush() |
||||
|
||||
def run_benchmarks(options, datafile): |
||||
failed_tests = 0 |
||||
logfile_base = 'meson-logs/benchmarklog' |
||||
jsonlogfilename = logfile_base+ '.json' |
||||
with open(datafile, 'rb') as f: |
||||
tests = pickle.load(f) |
||||
num_tests = len(tests) |
||||
if num_tests == 0: |
||||
print('No benchmarks defined.') |
||||
return 0 |
||||
iteration_count = 5 |
||||
wrap = [] # Benchmarks on cross builds are pointless so don't support them. |
||||
with open(jsonlogfilename, 'w') as jsonlogfile: |
||||
for i, test in enumerate(tests): |
||||
runs = [] |
||||
durations = [] |
||||
failed = False |
||||
for _ in range(iteration_count): |
||||
res = meson_test.run_single_test(wrap, test) |
||||
runs.append(res) |
||||
durations.append(res.duration) |
||||
if res.returncode != 0: |
||||
failed = True |
||||
mean = statistics.mean(durations) |
||||
stddev = statistics.stdev(durations) |
||||
if failed: |
||||
resultstr = 'FAIL' |
||||
failed_tests += 1 |
||||
else: |
||||
resultstr = 'OK' |
||||
print_stats(3, num_tests, test.name, resultstr, i, mean, stddev) |
||||
print_json_log(jsonlogfile, runs, test.name, i) |
||||
print('\nFull log written to meson-logs/benchmarklog.json.') |
||||
return failed_tests |
||||
|
||||
def run(args): |
||||
global failed_tests |
||||
options = parser.parse_args(args) |
||||
if len(options.args) != 1: |
||||
print('Benchmark runner for Meson. Do not run on your own, mmm\'kay?') |
||||
print('%s [data file]' % sys.argv[0]) |
||||
if options.wd is not None: |
||||
os.chdir(options.wd) |
||||
datafile = options.args[0] |
||||
returncode = run_benchmarks(options, datafile) |
||||
return returncode |
||||
|
||||
if __name__ == '__main__': |
||||
sys.exit(run(sys.argv[1:])) |
@ -1,290 +0,0 @@ |
||||
#!/usr/bin/env python3 |
||||
|
||||
# Copyright 2013-2016 The Meson development team |
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
# you may not use this file except in compliance with the License. |
||||
# You may obtain a copy of the License at |
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
|
||||
import mesonbuild |
||||
from .. import build |
||||
import sys, os, subprocess, time, datetime, pickle, multiprocessing, json |
||||
import concurrent.futures as conc |
||||
import argparse |
||||
import platform |
||||
import signal |
||||
|
||||
def is_windows(): |
||||
platname = platform.system().lower() |
||||
return platname == 'windows' or 'mingw' in platname |
||||
|
||||
collected_logs = [] |
||||
error_count = 0 |
||||
options = None |
||||
|
||||
parser = argparse.ArgumentParser() |
||||
parser.add_argument('--wrapper', default=None, dest='wrapper', |
||||
help='wrapper to run tests with (e.g. valgrind)') |
||||
parser.add_argument('--wd', default=None, dest='wd', |
||||
help='directory to cd into before running') |
||||
parser.add_argument('--suite', default=None, dest='suite', |
||||
help='Only run tests belonging to this suite.') |
||||
parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false', |
||||
help='Do not split stderr and stdout in test logs.') |
||||
parser.add_argument('--print-errorlogs', default=False, action='store_true', |
||||
help="Whether to print faling tests' logs.") |
||||
parser.add_argument('args', nargs='+') |
||||
|
||||
|
||||
class TestRun(): |
||||
def __init__(self, res, returncode, should_fail, duration, stdo, stde, cmd, |
||||
env): |
||||
self.res = res |
||||
self.returncode = returncode |
||||
self.duration = duration |
||||
self.stdo = stdo |
||||
self.stde = stde |
||||
self.cmd = cmd |
||||
self.env = env |
||||
self.should_fail = should_fail |
||||
|
||||
def get_log(self): |
||||
res = '--- command ---\n' |
||||
if self.cmd is None: |
||||
res += 'NONE\n' |
||||
else: |
||||
res += "\n%s %s\n" %(' '.join( |
||||
["%s='%s'" % (k, v) for k, v in self.env.items()]), |
||||
' ' .join(self.cmd)) |
||||
if self.stdo: |
||||
res += '--- stdout ---\n' |
||||
res += self.stdo |
||||
if self.stde: |
||||
if res[-1:] != '\n': |
||||
res += '\n' |
||||
res += '--- stderr ---\n' |
||||
res += self.stde |
||||
if res[-1:] != '\n': |
||||
res += '\n' |
||||
res += '-------\n\n' |
||||
return res |
||||
|
||||
def decode(stream): |
||||
try: |
||||
return stream.decode('utf-8') |
||||
except UnicodeDecodeError: |
||||
return stream.decode('iso-8859-1', errors='ignore') |
||||
|
||||
def write_json_log(jsonlogfile, test_name, result): |
||||
jresult = {'name' : test_name, |
||||
'stdout' : result.stdo, |
||||
'result' : result.res, |
||||
'duration' : result.duration, |
||||
'returncode' : result.returncode, |
||||
'command' : result.cmd, |
||||
'env' : result.env} |
||||
if result.stde: |
||||
jresult['stderr'] = result.stde |
||||
jsonlogfile.write(json.dumps(jresult) + '\n') |
||||
|
||||
def run_with_mono(fname): |
||||
if fname.endswith('.exe') and not is_windows(): |
||||
return True |
||||
return False |
||||
|
||||
def run_single_test(wrap, test): |
||||
global options |
||||
if test.fname[0].endswith('.jar'): |
||||
cmd = ['java', '-jar'] + test.fname |
||||
elif not test.is_cross and run_with_mono(test.fname[0]): |
||||
cmd = ['mono'] + test.fname |
||||
else: |
||||
if test.is_cross: |
||||
if test.exe_runner is None: |
||||
# Can not run test on cross compiled executable |
||||
# because there is no execute wrapper. |
||||
cmd = None |
||||
else: |
||||
cmd = [test.exe_runner] + test.fname |
||||
else: |
||||
cmd = test.fname |
||||
if cmd is None: |
||||
res = 'SKIP' |
||||
duration = 0.0 |
||||
stdo = 'Not run because can not execute cross compiled binaries.' |
||||
stde = None |
||||
returncode = -1 |
||||
else: |
||||
if len(wrap) > 0 and 'valgrind' in wrap[0]: |
||||
cmd = wrap + test.valgrind_args + cmd + test.cmd_args |
||||
else: |
||||
cmd = wrap + cmd + test.cmd_args |
||||
starttime = time.time() |
||||
child_env = os.environ.copy() |
||||
if isinstance(test.env, build.EnvironmentVariables): |
||||
test.env = test.env.get_env(child_env) |
||||
|
||||
child_env.update(test.env) |
||||
if len(test.extra_paths) > 0: |
||||
child_env['PATH'] = (child_env['PATH'] + |
||||
os.pathsep.join([''] + test.extra_paths)) |
||||
if is_windows(): |
||||
setsid = None |
||||
else: |
||||
setsid = os.setsid |
||||
p = subprocess.Popen(cmd, |
||||
stdout=subprocess.PIPE, |
||||
stderr=subprocess.PIPE if options and options.split else subprocess.STDOUT, |
||||
env=child_env, |
||||
cwd=test.workdir, |
||||
preexec_fn=setsid) |
||||
timed_out = False |
||||
try: |
||||
(stdo, stde) = p.communicate(timeout=test.timeout) |
||||
except subprocess.TimeoutExpired: |
||||
timed_out = True |
||||
# Python does not provide multiplatform support for |
||||
# killing a process and all its children so we need |
||||
# to roll our own. |
||||
if is_windows(): |
||||
subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)]) |
||||
else: |
||||
os.killpg(os.getpgid(p.pid), signal.SIGKILL) |
||||
(stdo, stde) = p.communicate() |
||||
endtime = time.time() |
||||
duration = endtime - starttime |
||||
stdo = decode(stdo) |
||||
if stde: |
||||
stde = decode(stde) |
||||
if timed_out: |
||||
res = 'TIMEOUT' |
||||
elif (not test.should_fail and p.returncode == 0) or \ |
||||
(test.should_fail and p.returncode != 0): |
||||
res = 'OK' |
||||
else: |
||||
res = 'FAIL' |
||||
returncode = p.returncode |
||||
return TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env) |
||||
|
||||
def print_stats(numlen, tests, name, result, i, logfile, jsonlogfile): |
||||
global collected_logs, error_count, options |
||||
startpad = ' '*(numlen - len('%d' % (i+1))) |
||||
num = '%s%d/%d' % (startpad, i+1, len(tests)) |
||||
padding1 = ' '*(38-len(name)) |
||||
padding2 = ' '*(8-len(result.res)) |
||||
result_str = '%s %s %s%s%s%5.2f s' % \ |
||||
(num, name, padding1, result.res, padding2, result.duration) |
||||
print(result_str) |
||||
result_str += "\n\n" + result.get_log() |
||||
if (result.returncode != 0) != result.should_fail: |
||||
error_count += 1 |
||||
if options.print_errorlogs: |
||||
collected_logs.append(result_str) |
||||
logfile.write(result_str) |
||||
write_json_log(jsonlogfile, name, result) |
||||
|
||||
def drain_futures(futures): |
||||
for i in futures: |
||||
(result, numlen, tests, name, i, logfile, jsonlogfile) = i |
||||
print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile) |
||||
|
||||
def filter_tests(suite, tests): |
||||
if suite is None: |
||||
return tests |
||||
return [x for x in tests if suite in x.suite] |
||||
|
||||
def run_tests(datafilename): |
||||
global options |
||||
logfile_base = 'meson-logs/testlog' |
||||
if options.wrapper is None: |
||||
wrap = [] |
||||
logfilename = logfile_base + '.txt' |
||||
jsonlogfilename = logfile_base+ '.json' |
||||
else: |
||||
wrap = [options.wrapper] |
||||
logfilename = logfile_base + '-' + options.wrapper.replace(' ', '_') + '.txt' |
||||
jsonlogfilename = logfile_base + '-' + options.wrapper.replace(' ', '_') + '.json' |
||||
with open(datafilename, 'rb') as f: |
||||
tests = pickle.load(f) |
||||
if len(tests) == 0: |
||||
print('No tests defined.') |
||||
return |
||||
numlen = len('%d' % len(tests)) |
||||
varname = 'MESON_TESTTHREADS' |
||||
if varname in os.environ: |
||||
try: |
||||
num_workers = int(os.environ[varname]) |
||||
except ValueError: |
||||
print('Invalid value in %s, using 1 thread.' % varname) |
||||
num_workers = 1 |
||||
else: |
||||
num_workers = multiprocessing.cpu_count() |
||||
executor = conc.ThreadPoolExecutor(max_workers=num_workers) |
||||
futures = [] |
||||
filtered_tests = filter_tests(options.suite, tests) |
||||
|
||||
with open(jsonlogfilename, 'w') as jsonlogfile, \ |
||||
open(logfilename, 'w') as logfile: |
||||
logfile.write('Log of Meson test suite run on %s.\n\n' % |
||||
datetime.datetime.now().isoformat()) |
||||
for i, test in enumerate(filtered_tests): |
||||
if test.suite[0] == '': |
||||
visible_name = test.name |
||||
else: |
||||
if options.suite is not None: |
||||
visible_name = options.suite + ' / ' + test.name |
||||
else: |
||||
visible_name = test.suite[0] + ' / ' + test.name |
||||
|
||||
if not test.is_parallel: |
||||
drain_futures(futures) |
||||
futures = [] |
||||
res = run_single_test(wrap, test) |
||||
print_stats(numlen, filtered_tests, visible_name, res, i, |
||||
logfile, jsonlogfile) |
||||
else: |
||||
f = executor.submit(run_single_test, wrap, test) |
||||
futures.append((f, numlen, filtered_tests, visible_name, i, |
||||
logfile, jsonlogfile)) |
||||
drain_futures(futures) |
||||
return logfilename |
||||
|
||||
def run(args): |
||||
global collected_logs, error_count, options |
||||
collected_logs = [] # To avoid state leaks when invoked multiple times (running tests in-process) |
||||
error_count = 0 |
||||
options = parser.parse_args(args) |
||||
if len(options.args) != 1: |
||||
print('Test runner for Meson. Do not run on your own, mmm\'kay?') |
||||
print('%s [data file]' % sys.argv[0]) |
||||
if options.wd is not None: |
||||
os.chdir(options.wd) |
||||
datafile = options.args[0] |
||||
logfilename = run_tests(datafile) |
||||
if len(collected_logs) > 0: |
||||
if len(collected_logs) > 10: |
||||
print('\nThe output from 10 first failed tests:\n') |
||||
else: |
||||
print('\nThe output from the failed tests:\n') |
||||
for log in collected_logs[:10]: |
||||
lines = log.splitlines() |
||||
if len(lines) > 100: |
||||
print(lines[0]) |
||||
print('--- Listing only the last 100 lines from a long log. ---') |
||||
lines = lines[-99:] |
||||
for line in lines: |
||||
print(line) |
||||
if logfilename: |
||||
print('Full log written to %s.' % logfilename) |
||||
return error_count |
||||
|
||||
if __name__ == '__main__': |
||||
sys.exit(run(sys.argv[1:])) |
@ -0,0 +1,375 @@ |
||||
#!/usr/bin/env python3 |
||||
|
||||
# Copyright 2016 The Meson development team |
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
# you may not use this file except in compliance with the License. |
||||
# You may obtain a copy of the License at |
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
|
||||
# A tool to run tests in many different ways. |
||||
|
||||
import subprocess, sys, os, argparse |
||||
import pickle |
||||
import mesonbuild |
||||
from mesonbuild import build |
||||
from mesonbuild import environment |
||||
|
||||
import time, datetime, pickle, multiprocessing, json |
||||
import concurrent.futures as conc |
||||
import platform |
||||
import signal |
||||
|
||||
def is_windows(): |
||||
platname = platform.system().lower() |
||||
return platname == 'windows' or 'mingw' in platname |
||||
|
||||
def determine_worker_count(): |
||||
varname = 'MESON_TESTTHREADS' |
||||
if varname in os.environ: |
||||
try: |
||||
num_workers = int(os.environ[varname]) |
||||
except ValueError: |
||||
print('Invalid value in %s, using 1 thread.' % varname) |
||||
num_workers = 1 |
||||
else: |
||||
try: |
||||
# Fails in some weird environments such as Debian |
||||
# reproducible build. |
||||
num_workers = multiprocessing.cpu_count() |
||||
except Exception: |
||||
num_workers = 1 |
||||
return num_workers |
||||
|
||||
parser = argparse.ArgumentParser() |
||||
parser.add_argument('--repeat', default=1, dest='repeat', type=int, |
||||
help='Number of times to run the tests.') |
||||
parser.add_argument('--gdb', default=False, dest='gdb', action='store_true', |
||||
help='Run test under gdb.') |
||||
parser.add_argument('--list', default=False, dest='list', action='store_true', |
||||
help='List available tests.') |
||||
parser.add_argument('--wrapper', default=None, dest='wrapper', |
||||
help='wrapper to run tests with (e.g. Valgrind)') |
||||
parser.add_argument('--wd', default=None, dest='wd', |
||||
help='directory to cd into before running') |
||||
parser.add_argument('--suite', default=None, dest='suite', |
||||
help='Only run tests belonging to the given suite.') |
||||
parser.add_argument('--no-stdsplit', default=True, dest='split', action='store_false', |
||||
help='Do not split stderr and stdout in test logs.') |
||||
parser.add_argument('--print-errorlogs', default=False, action='store_true', |
||||
help="Whether to print faling tests' logs.") |
||||
parser.add_argument('--benchmark', default=False, action='store_true', |
||||
help="Run benchmarks instead of tests.") |
||||
parser.add_argument('--logbase', default='testlog', |
||||
help="Base name for log file.") |
||||
parser.add_argument('--num-processes', default=determine_worker_count(), type=int, |
||||
help='How many parallel processes to use.') |
||||
parser.add_argument('args', nargs='*') |
||||
|
||||
class TestRun(): |
||||
def __init__(self, res, returncode, should_fail, duration, stdo, stde, cmd, |
||||
env): |
||||
self.res = res |
||||
self.returncode = returncode |
||||
self.duration = duration |
||||
self.stdo = stdo |
||||
self.stde = stde |
||||
self.cmd = cmd |
||||
self.env = env |
||||
self.should_fail = should_fail |
||||
|
||||
def get_log(self): |
||||
res = '--- command ---\n' |
||||
if self.cmd is None: |
||||
res += 'NONE\n' |
||||
else: |
||||
res += "\n%s %s\n" %(' '.join( |
||||
["%s='%s'" % (k, v) for k, v in self.env.items()]), |
||||
' ' .join(self.cmd)) |
||||
if self.stdo: |
||||
res += '--- stdout ---\n' |
||||
res += self.stdo |
||||
if self.stde: |
||||
if res[-1:] != '\n': |
||||
res += '\n' |
||||
res += '--- stderr ---\n' |
||||
res += self.stde |
||||
if res[-1:] != '\n': |
||||
res += '\n' |
||||
res += '-------\n\n' |
||||
return res |
||||
|
||||
def decode(stream): |
||||
try: |
||||
return stream.decode('utf-8') |
||||
except UnicodeDecodeError: |
||||
return stream.decode('iso-8859-1', errors='ignore') |
||||
|
||||
def write_json_log(jsonlogfile, test_name, result): |
||||
jresult = {'name' : test_name, |
||||
'stdout' : result.stdo, |
||||
'result' : result.res, |
||||
'duration' : result.duration, |
||||
'returncode' : result.returncode, |
||||
'command' : result.cmd, |
||||
'env' : result.env} |
||||
if result.stde: |
||||
jresult['stderr'] = result.stde |
||||
jsonlogfile.write(json.dumps(jresult) + '\n') |
||||
|
||||
def run_with_mono(fname): |
||||
if fname.endswith('.exe') and not is_windows(): |
||||
return True |
||||
return False |
||||
|
||||
class TestHarness: |
||||
def __init__(self, options): |
||||
self.options = options |
||||
self.collected_logs = [] |
||||
self.error_count = 0 |
||||
self.is_run = False |
||||
if self.options.benchmark: |
||||
self.datafile = 'meson-private/meson_benchmark_setup.dat' |
||||
else: |
||||
self.datafile = 'meson-private/meson_test_setup.dat' |
||||
|
||||
def run_single_test(self, wrap, test): |
||||
if test.fname[0].endswith('.jar'): |
||||
cmd = ['java', '-jar'] + test.fname |
||||
elif not test.is_cross and run_with_mono(test.fname[0]): |
||||
cmd = ['mono'] + test.fname |
||||
else: |
||||
if test.is_cross: |
||||
if test.exe_runner is None: |
||||
# Can not run test on cross compiled executable |
||||
# because there is no execute wrapper. |
||||
cmd = None |
||||
else: |
||||
cmd = [test.exe_runner] + test.fname |
||||
else: |
||||
cmd = test.fname |
||||
if cmd is None: |
||||
res = 'SKIP' |
||||
duration = 0.0 |
||||
stdo = 'Not run because can not execute cross compiled binaries.' |
||||
stde = None |
||||
returncode = -1 |
||||
else: |
||||
cmd = wrap + cmd + test.cmd_args |
||||
starttime = time.time() |
||||
child_env = os.environ.copy() |
||||
if isinstance(test.env, build.EnvironmentVariables): |
||||
test.env = test.env.get_env(child_env) |
||||
|
||||
child_env.update(test.env) |
||||
if len(test.extra_paths) > 0: |
||||
child_env['PATH'] = child_env['PATH'] + ';'.join([''] + test.extra_paths) |
||||
if is_windows(): |
||||
setsid = None |
||||
else: |
||||
setsid = os.setsid |
||||
p = subprocess.Popen(cmd, |
||||
stdout=subprocess.PIPE, |
||||
stderr=subprocess.PIPE if self.options and self.options.split else subprocess.STDOUT, |
||||
env=child_env, |
||||
cwd=test.workdir, |
||||
preexec_fn=setsid) |
||||
timed_out = False |
||||
try: |
||||
(stdo, stde) = p.communicate(timeout=test.timeout) |
||||
except subprocess.TimeoutExpired: |
||||
timed_out = True |
||||
# Python does not provide multiplatform support for |
||||
# killing a process and all its children so we need |
||||
# to roll our own. |
||||
if is_windows(): |
||||
subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)]) |
||||
else: |
||||
os.killpg(os.getpgid(p.pid), signal.SIGKILL) |
||||
(stdo, stde) = p.communicate() |
||||
endtime = time.time() |
||||
duration = endtime - starttime |
||||
stdo = decode(stdo) |
||||
if stde: |
||||
stde = decode(stde) |
||||
if timed_out: |
||||
res = 'TIMEOUT' |
||||
elif (not test.should_fail and p.returncode == 0) or \ |
||||
(test.should_fail and p.returncode != 0): |
||||
res = 'OK' |
||||
else: |
||||
res = 'FAIL' |
||||
returncode = p.returncode |
||||
return TestRun(res, returncode, test.should_fail, duration, stdo, stde, cmd, test.env) |
||||
|
||||
def print_stats(self, numlen, tests, name, result, i, logfile, jsonlogfile): |
||||
startpad = ' '*(numlen - len('%d' % (i+1))) |
||||
num = '%s%d/%d' % (startpad, i+1, len(tests)) |
||||
padding1 = ' '*(38-len(name)) |
||||
padding2 = ' '*(8-len(result.res)) |
||||
result_str = '%s %s %s%s%s%5.2f s' % \ |
||||
(num, name, padding1, result.res, padding2, result.duration) |
||||
print(result_str) |
||||
result_str += "\n\n" + result.get_log() |
||||
if (result.returncode != 0) != result.should_fail: |
||||
self.error_count += 1 |
||||
if self.options.print_errorlogs: |
||||
self.collected_logs.append(result_str) |
||||
logfile.write(result_str) |
||||
write_json_log(jsonlogfile, name, result) |
||||
|
||||
def doit(self): |
||||
if self.is_run: |
||||
raise RuntimeError('Test harness object can only be used once.') |
||||
if not os.path.isfile(self.datafile): |
||||
print('Test data file. Probably this means that you did not run this in the build directory.') |
||||
return 1 |
||||
self.is_run = True |
||||
logfilename = self.run_tests(self.datafile, self.options.logbase) |
||||
if len(self.collected_logs) > 0: |
||||
if len(self.collected_logs) > 10: |
||||
print('\nThe output from 10 first failed tests:\n') |
||||
else: |
||||
print('\nThe output from the failed tests:\n') |
||||
for log in self.collected_logs[:10]: |
||||
lines = log.splitlines() |
||||
if len(lines) > 100: |
||||
print(lines[0]) |
||||
print('--- Listing only the last 100 lines from a long log. ---') |
||||
lines = lines[-99:] |
||||
for line in lines: |
||||
print(line) |
||||
print('Full log written to %s.' % logfilename) |
||||
return self.error_count |
||||
|
||||
def run_tests(self, datafilename, log_base): |
||||
logfile_base = os.path.join('meson-logs', log_base) |
||||
if self.options.wrapper is None: |
||||
wrap = [] |
||||
logfilename = logfile_base + '.txt' |
||||
jsonlogfilename = logfile_base+ '.json' |
||||
else: |
||||
wrap = self.options.wrapper.split() |
||||
namebase = wrap[0] |
||||
logfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.txt' |
||||
jsonlogfilename = logfile_base + '-' + namebase.replace(' ', '_') + '.json' |
||||
with open(datafilename, 'rb') as f: |
||||
tests = pickle.load(f) |
||||
if len(tests) == 0: |
||||
print('No tests defined.') |
||||
return |
||||
numlen = len('%d' % len(tests)) |
||||
executor = conc.ThreadPoolExecutor(max_workers=self.options.num_processes) |
||||
futures = [] |
||||
filtered_tests = filter_tests(self.options.suite, tests) |
||||
|
||||
with open(jsonlogfilename, 'w') as jsonlogfile, \ |
||||
open(logfilename, 'w') as logfile: |
||||
logfile.write('Log of Meson test suite run on %s.\n\n' % |
||||
datetime.datetime.now().isoformat()) |
||||
for i, test in enumerate(filtered_tests): |
||||
if test.suite[0] == '': |
||||
visible_name = test.name |
||||
else: |
||||
if self.options.suite is not None: |
||||
visible_name = self.options.suite + ' / ' + test.name |
||||
else: |
||||
visible_name = test.suite[0] + ' / ' + test.name |
||||
|
||||
if not test.is_parallel: |
||||
self.drain_futures(futures) |
||||
futures = [] |
||||
res = self.run_single_test(wrap, test) |
||||
self.print_stats(numlen, filtered_tests, visible_name, res, i, |
||||
logfile, jsonlogfile) |
||||
else: |
||||
f = executor.submit(self.run_single_test, wrap, test) |
||||
futures.append((f, numlen, filtered_tests, visible_name, i, |
||||
logfile, jsonlogfile)) |
||||
self.drain_futures(futures) |
||||
return logfilename |
||||
|
||||
|
||||
def drain_futures(self, futures): |
||||
for i in futures: |
||||
(result, numlen, tests, name, i, logfile, jsonlogfile) = i |
||||
self.print_stats(numlen, tests, name, result.result(), i, logfile, jsonlogfile) |
||||
|
||||
def run_special(self): |
||||
'Tests run by the user, usually something like "under gdb 1000 times".' |
||||
if self.is_run: |
||||
raise RuntimeError('Can not use run_special after a full run.') |
||||
if self.options.wrapper is not None: |
||||
wrap = self.options.wrapper.split(' ') |
||||
else: |
||||
wrap = [] |
||||
if self.options.gdb and len(wrap) > 0: |
||||
print('Can not specify both a wrapper and gdb.') |
||||
return 1 |
||||
if os.path.isfile('build.ninja'): |
||||
subprocess.check_call([environment.detect_ninja(), 'all']) |
||||
tests = pickle.load(open(self.datafile, 'rb')) |
||||
if self.options.list: |
||||
for i in tests: |
||||
print(i.name) |
||||
return 0 |
||||
for t in tests: |
||||
if t.name in self.options.args: |
||||
for i in range(self.options.repeat): |
||||
print('Running: %s %d/%d' % (t.name, i+1, self.options.repeat)) |
||||
if self.options.gdb: |
||||
gdbrun(t) |
||||
else: |
||||
res = self.run_single_test(wrap, t) |
||||
if (res.returncode == 0 and res.should_fail) or \ |
||||
(res.returncode != 0 and not res.should_fail): |
||||
print('Test failed:\n\n-- stdout --\n') |
||||
print(res.stdo) |
||||
print('\n-- stderr --\n') |
||||
print(res.stde) |
||||
return 1 |
||||
return 0 |
||||
|
||||
def filter_tests(suite, tests): |
||||
if suite is None: |
||||
return tests |
||||
return [x for x in tests if suite in x.suite] |
||||
|
||||
def gdbrun(test): |
||||
child_env = os.environ.copy() |
||||
child_env.update(test.env) |
||||
# On success will exit cleanly. On failure gdb will ask user |
||||
# if they really want to exit. |
||||
exe = test.fname |
||||
args = test.cmd_args |
||||
if len(args) > 0: |
||||
argset = ['-ex', 'set args ' + ' '.join(args)] |
||||
else: |
||||
argset = [] |
||||
cmd = ['gdb', '--quiet'] + argset + ['-ex', 'run', '-ex', 'quit'] + exe |
||||
# FIXME a ton of stuff. run_single_test grabs stdout & co, |
||||
# which we do not want to do when running under gdb. |
||||
p = subprocess.Popen(cmd, |
||||
env=child_env, |
||||
cwd=test.workdir, |
||||
) |
||||
p.communicate() |
||||
|
||||
def run(args): |
||||
options = parser.parse_args(args) |
||||
if options.benchmark: |
||||
options.num_processes = 1 |
||||
th = TestHarness(options) |
||||
if len(options.args) == 0: |
||||
return th.doit() |
||||
return th.run_special() |
||||
|
||||
if __name__ == '__main__': |
||||
sys.exit(run(sys.argv[1:])) |
Loading…
Reference in new issue