Merge pull request #317 from mesonbuild/benchmark

Create benchmark feature
pull/323/head
Jussi Pakkanen 9 years ago
commit f7608fc569
  1. 12
      backends.py
  2. 4
      build.py
  3. 1
      coredata.py
  4. 11
      interpreter.py
  5. 97
      meson_benchmark.py
  6. 12
      mesonintrospect.py
  7. 11
      ninjabackend.py
  8. 9
      run_tests.py
  9. 20
      test cases/common/99 benchmark/delayer.c
  10. 5
      test cases/common/99 benchmark/meson.build
  11. 2
      test cases/frameworks/1 boost/nomod.cpp

@ -127,6 +127,10 @@ class Backend():
datafile = open(test_data, 'wb') datafile = open(test_data, 'wb')
self.write_test_file(datafile) self.write_test_file(datafile)
datafile.close() datafile.close()
benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
datafile = open(benchmark_data, 'wb')
self.write_benchmark_file(datafile)
datafile.close()
def has_vala(self, target): def has_vala(self, target):
for s in target.get_sources(): for s in target.get_sources():
@ -269,9 +273,15 @@ class Backend():
result.append(dirseg) result.append(dirseg)
return result return result
def write_benchmark_file(self, datafile):
self.write_test_serialisation(self.build.get_benchmarks(), datafile)
def write_test_file(self, datafile): def write_test_file(self, datafile):
self.write_test_serialisation(self.build.get_tests(), datafile)
def write_test_serialisation(self, tests, datafile):
arr = [] arr = []
for t in self.build.get_tests(): for t in tests:
exe = t.get_exe() exe = t.get_exe()
if isinstance(exe, dependencies.ExternalProgram): if isinstance(exe, dependencies.ExternalProgram):
fname = exe.fullpath fname = exe.fullpath

@ -65,6 +65,7 @@ class Build:
self.cross_compilers = [] self.cross_compilers = []
self.global_args = {} self.global_args = {}
self.tests = [] self.tests = []
self.benchmarks = []
self.headers = [] self.headers = []
self.man = [] self.man = []
self.data = [] self.data = []
@ -108,6 +109,9 @@ class Build:
def get_tests(self): def get_tests(self):
return self.tests return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self): def get_headers(self):
return self.headers return self.headers

@ -215,6 +215,7 @@ forbidden_target_names = {'clean': None,
'all': None, 'all': None,
'test': None, 'test': None,
'test-valgrind': None, 'test-valgrind': None,
'benchmark': None,
'install': None, 'install': None,
'build.ninja': None, 'build.ninja': None,
} }

@ -943,6 +943,7 @@ class Interpreter():
'run_target' : self.func_run_target, 'run_target' : self.func_run_target,
'generator' : self.func_generator, 'generator' : self.func_generator,
'test' : self.func_test, 'test' : self.func_test,
'benchmark' : self.func_benchmark,
'install_headers' : self.func_install_headers, 'install_headers' : self.func_install_headers,
'install_man' : self.func_install_man, 'install_man' : self.func_install_man,
'subdir' : self.func_subdir, 'subdir' : self.func_subdir,
@ -1677,7 +1678,13 @@ class Interpreter():
self.generators.append(gen) self.generators.append(gen)
return gen return gen
def func_benchmark(self, node, args, kwargs):
self.add_test(node, args, kwargs, False)
def func_test(self, node, args, kwargs): def func_test(self, node, args, kwargs):
self.add_test(node, args, kwargs, True)
def add_test(self, node, args, kwargs, is_base_test):
if len(args) != 2: if len(args) != 2:
raise InterpreterException('Incorrect number of arguments') raise InterpreterException('Incorrect number of arguments')
if not isinstance(args[0], str): if not isinstance(args[0], str):
@ -1719,8 +1726,12 @@ class Interpreter():
if not isinstance(timeout, int): if not isinstance(timeout, int):
raise InterpreterException('Timeout must be an integer.') raise InterpreterException('Timeout must be an integer.')
t = Test(args[0], args[1].held_object, par, cmd_args, env, should_fail, valgrind_args, timeout) t = Test(args[0], args[1].held_object, par, cmd_args, env, should_fail, valgrind_args, timeout)
if is_base_test:
self.build.tests.append(t) self.build.tests.append(t)
mlog.debug('Adding test "', mlog.bold(args[0]), '".', sep='') mlog.debug('Adding test "', mlog.bold(args[0]), '".', sep='')
else:
self.build.benchmarks.append(t)
mlog.debug('Adding benchmark "', mlog.bold(args[0]), '".', sep='')
@stringArgs @stringArgs
def func_install_headers(self, node, args, kwargs): def func_install_headers(self, node, args, kwargs):

@ -0,0 +1,97 @@
#!/usr/bin/env python3
# Copyright 2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess, sys, os, argparse
import pickle, statistics, json
import meson_test
parser = argparse.ArgumentParser()
parser.add_argument('--wd', default=None, dest='wd',
help='directory to cd into before running')
parser.add_argument('args', nargs='+')
def print_stats(numlen, num_tests, name, res, i, duration, stdev):
startpad = ' '*(numlen - len('%d' % (i+1)))
num = '%s%d/%d' % (startpad, i+1, num_tests)
padding1 = ' '*(38-len(name))
padding2 = ' '*(8-len(res))
result_str = '%s %s %s%s%s%5.5f s +- %5.5f s' % \
(num, name, padding1, res, padding2, duration, stdev)
print(result_str)
# write_json_log(jsonlogfile, name, result)
def print_json_log(jsonlogfile, rawruns, test_name, i):
jsonobj = {'name' : test_name}
runs = []
for r in rawruns:
runobj = {'duration': r.duration,
'stdout': r.stdo,
'stderr': r.stde,
'returncode' : r.returncode,
'duration' : r.duration}
runs.append(runobj)
jsonobj['runs'] = runs
jsonlogfile.write(json.dumps(jsonobj) + '\n')
jsonlogfile.flush()
def run_benchmarks(options, datafile):
failed_tests = 0
logfile_base = 'meson-logs/benchmarklog'
jsonlogfilename = logfile_base+ '.json'
jsonlogfile = open(jsonlogfilename, 'w')
tests = pickle.load(open(datafile, 'rb'))
num_tests = len(tests)
if num_tests == 0:
print('No benchmarks defined.')
return 0
iteration_count = 5
wrap = [] # Benchmarks on cross builds are pointless so don't support them.
for i, test in enumerate(tests):
runs = []
durations = []
failed = False
for _ in range(iteration_count):
res = meson_test.run_single_test(wrap, test)
runs.append(res)
durations.append(res.duration)
if res.returncode != 0:
failed = True
mean = statistics.mean(durations)
stddev = statistics.stdev(durations)
if failed:
resultstr = 'FAIL'
failed_tests += 1
else:
resultstr = 'OK'
print_stats(3, num_tests, test.name, resultstr, i, mean, stddev)
print_json_log(jsonlogfile, runs, test.name, i)
print('\nFull log written to meson-logs/benchmarklog.json.')
return failed_tests
def run(args):
global failed_tests
options = parser.parse_args(args)
if len(options.args) != 1:
print('Benchmark runner for Meson. Do not run on your own, mmm\'kay?')
print('%s [data file]' % sys.argv[0])
if options.wd is not None:
os.chdir(options.wd)
datafile = options.args[0]
returncode = run_benchmarks(options, datafile)
return returncode
if __name__ == '__main__':
sys.exit(run(sys.argv[1:]))

@ -37,6 +37,8 @@ parser.add_argument('--buildoptions', action='store_true', dest='buildoptions',
help='List all build options.') help='List all build options.')
parser.add_argument('--tests', action='store_true', dest='tests', default=False, parser.add_argument('--tests', action='store_true', dest='tests', default=False,
help='List all unit tests.') help='List all unit tests.')
parser.add_argument('--benchmarks', action='store_true', dest='benchmarks', default=False,
help='List all benchmarks.')
parser.add_argument('--dependencies', action='store_true', dest='dependencies', default=False, parser.add_argument('--dependencies', action='store_true', dest='dependencies', default=False,
help='list external dependencies.') help='list external dependencies.')
parser.add_argument('args', nargs='+') parser.add_argument('args', nargs='+')
@ -157,7 +159,11 @@ def list_tests(testdata):
result = [] result = []
for t in testdata: for t in testdata:
to = {} to = {}
to['cmd'] = [t.fname] + t.cmd_args if isinstance(t.fname, str):
fname = [t.fname]
else:
fname = t.fname
to['cmd'] = fname + t.cmd_args
to['env'] = t.env to['env'] = t.env
to['name'] = t.name to['name'] = t.name
result.append(to) result.append(to)
@ -175,9 +181,11 @@ if __name__ == '__main__':
corefile = os.path.join(bdir, 'meson-private/coredata.dat') corefile = os.path.join(bdir, 'meson-private/coredata.dat')
buildfile = os.path.join(bdir, 'meson-private/build.dat') buildfile = os.path.join(bdir, 'meson-private/build.dat')
testfile = os.path.join(bdir, 'meson-private/meson_test_setup.dat') testfile = os.path.join(bdir, 'meson-private/meson_test_setup.dat')
benchmarkfile = os.path.join(bdir, 'meson-private/meson_benchmark_setup.dat')
coredata = pickle.load(open(corefile, 'rb')) coredata = pickle.load(open(corefile, 'rb'))
builddata = pickle.load(open(buildfile, 'rb')) builddata = pickle.load(open(buildfile, 'rb'))
testdata = pickle.load(open(testfile, 'rb')) testdata = pickle.load(open(testfile, 'rb'))
benchmarkdata = pickle.load(open(benchmarkfile, 'rb'))
if options.list_targets: if options.list_targets:
list_targets(coredata, builddata) list_targets(coredata, builddata)
elif options.target_files is not None: elif options.target_files is not None:
@ -188,6 +196,8 @@ if __name__ == '__main__':
list_buildoptions(coredata, builddata) list_buildoptions(coredata, builddata)
elif options.tests: elif options.tests:
list_tests(testdata) list_tests(testdata)
elif options.benchmarks:
list_tests(benchmarkdata)
elif options.dependencies: elif options.dependencies:
list_deps(coredata) list_deps(coredata)
else: else:

@ -550,6 +550,17 @@ class NinjaBackend(backends.Backend):
velem.write(outfile) velem.write(outfile)
self.check_outputs(velem) self.check_outputs(velem)
# And then benchmarks.
benchmark_script = os.path.join(script_root, 'meson_benchmark.py')
benchmark_data = os.path.join(self.environment.get_scratch_dir(), 'meson_benchmark_setup.dat')
cmd = [sys.executable, benchmark_script, benchmark_data]
elem = NinjaBuildElement('benchmark', 'CUSTOM_COMMAND', ['all', 'PHONY'])
elem.add_item('COMMAND', cmd)
elem.add_item('DESC', 'Running benchmark suite.')
elem.add_item('pool', 'console')
elem.write(outfile)
self.check_outputs(elem)
def generate_rules(self, outfile): def generate_rules(self, outfile):
outfile.write('# Rules for compiling.\n\n') outfile.write('# Rules for compiling.\n\n')
self.generate_compile_rules(outfile) self.generate_compile_rules(outfile)

@ -21,7 +21,7 @@ import sys
import environment import environment
import mesonlib import mesonlib
import mlog import mlog
import meson, meson_test import meson, meson_test, meson_benchmark
import argparse import argparse
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
import time import time
@ -87,7 +87,7 @@ def setup_commands(backend):
compile_commands = [ninja_command, '-v'] compile_commands = [ninja_command, '-v']
else: else:
compile_commands = [ninja_command] compile_commands = [ninja_command]
test_commands = [ninja_command, 'test'] test_commands = [ninja_command, 'test', 'benchmark']
install_commands = [ninja_command, 'install'] install_commands = [ninja_command, 'install']
def platform_fix_filename(fname): def platform_fix_filename(fname):
@ -165,11 +165,12 @@ def run_test_inprocess(testdir):
sys.stderr = mystderr = StringIO() sys.stderr = mystderr = StringIO()
old_cwd = os.getcwd() old_cwd = os.getcwd()
os.chdir(testdir) os.chdir(testdir)
returncode = meson_test.run(['meson-private/meson_test_setup.dat']) returncode_test = meson_test.run(['meson-private/meson_test_setup.dat'])
returncode_benchmark = meson_benchmark.run(['meson-private/meson_benchmark_setup.dat'])
sys.stdout = old_stdout sys.stdout = old_stdout
sys.stderr = old_stderr sys.stderr = old_stderr
os.chdir(old_cwd) os.chdir(old_cwd)
return (returncode, mystdout.getvalue(), mystderr.getvalue()) return (max(returncode_test, returncode_benchmark), mystdout.getvalue(), mystderr.getvalue())
def run_test(testdir, extra_args, should_succeed): def run_test(testdir, extra_args, should_succeed):

@ -0,0 +1,20 @@
/* Simple prog that sleeps for a random time. */
#include<stdlib.h>
#include<time.h>
#if defined(_WIN32)
#include<windows.h>
#endif
int main(int argc, char **argv) {
srand(time(NULL));
#if !defined(_WIN32)
struct timespec t;
t.tv_sec = 0;
t.tv_nsec = 199999999.0*rand()/RAND_MAX;
nanosleep(&t, NULL);
#else
Sleep(50.0*rand()/RAND_MAX);
#endif
return 0;
}

@ -0,0 +1,5 @@
project('benchmark', 'c',
default_options : ['c_std=gnu99'])
delayer = executable('delayer', 'delayer.c')
benchmark('delayer', delayer)

@ -9,7 +9,7 @@ boost::any get_any() {
int main(int argc, char **argv) { int main(int argc, char **argv) {
boost::any result = get_any(); boost::any result = get_any();
if(boost::any_cast<int>(result) == 3) { if(boost::any_cast<int>(result) == 3) {
std::cout << "Everything is fine in the worls.\n"; std::cout << "Everything is fine in the world.\n";
return 0; return 0;
} else { } else {
std::cout << "Mathematics stopped working.\n"; std::cout << "Mathematics stopped working.\n";

Loading…
Cancel
Save