diff --git a/builds/testing.mk b/builds/testing.mk index 3da038804..c6f7d57fc 100644 --- a/builds/testing.mk +++ b/builds/testing.mk @@ -3,7 +3,7 @@ FTBENCH_DIR = $(TOP_DIR)/src/tools/ftbench FTBENCH_SRC = $(FTBENCH_DIR)/ftbench.c FTBENCH_OBJ = $(OBJ_DIR)/bench.$(SO) FTBENCH_BIN = $(OBJ_DIR)/bench$E -FTBENCH_FLAG ?= -c 750 -w 50 +FTBENCH_FLAG ?= -c 1000 -w 100 INCLUDES = $(TOP_DIR)/include FONTS = $(wildcard $(FTBENCH_DIR)/fonts/*.ttf) BASELINE_DIR = $(OBJ_DIR)/baseline/ @@ -12,7 +12,7 @@ BASELINE = $(addprefix $(BASELINE_DIR), $(notdir $(FONTS:.ttf=.txt))) BENCHMARK = $(addprefix $(BENCHMARK_DIR), $(notdir $(FONTS:.ttf=.txt))) BASELINE_INFO = $(BASELINE_DIR)info.txt BENCHMARK_INFO = $(BENCHMARK_DIR)info.txt -HTMLCREATOR = $(FTBENCH_DIR)/src/tohtml.py +HTMLCREATOR = $(OBJ_DIR)/tohtml.py HTMLFILE = $(OBJ_DIR)/benchmark.html FT_INCLUDES := $(OBJ_BUILD) \ diff --git a/src/tools/ftbench/ftbench.c b/src/tools/ftbench/ftbench.c index 697c62222..0d5078b45 100644 --- a/src/tools/ftbench/ftbench.c +++ b/src/tools/ftbench/ftbench.c @@ -1046,6 +1046,10 @@ static void benchmark(FT_Face face, btest_t* test, int max_iter, double max_time { FT_Face face; FT_Error error; + btimer_t total; + + TIMER_RESET(&total); + TIMER_START(&total); unsigned long max_bytes = CACHE_SIZE * 1024; char* test_string = NULL; @@ -1390,7 +1394,7 @@ static void benchmark(FT_Face face, btest_t* test, int max_iter, double max_time if ( max_iter ) printf( "number of iterations for each test: at most %d\n", max_iter ); - printf( "number of iteration as warmup in all tests: %d\n", + printf( "number of iterations as warmup in all tests: %d\n", warmup_iter ); printf( "\n" @@ -1562,6 +1566,9 @@ static void benchmark(FT_Face face, btest_t* test, int max_iter, double max_time break; } } + TIMER_STOP(&total); + double total_time = TIMER_GET(&total); + printf("\nTotal time: %.0fs\n", total_time/1000000); if ( cache_man ) FTC_Manager_Done( cache_man ); diff --git a/src/tools/ftbench/src/tohtml.py b/src/tools/ftbench/src/tohtml.py deleted file mode 100644 index 0917b203b..000000000 --- a/src/tools/ftbench/src/tohtml.py +++ /dev/null @@ -1,292 +0,0 @@ -"""This script generates a HTML file from the results of ftbench""" -# Ahmet Goksu ahmet@goksu.in ahmet.goksu.in - -import os -import re -import sys - -PROJECT_ROOT = sys.argv[1] -BENCHMARK_HTML = os.path.join(PROJECT_ROOT, "benchmark.html") -GITLAB_URL = "https://gitlab.freedesktop.org/freetype/freetype/-/commit/" -CSS_STYLE = """ - -""" -BASELINE_DIR = os.path.join(PROJECT_ROOT, "baseline") -BENCHMARK_DIR = os.path.join(PROJECT_ROOT, "benchmark") - -def main(): - """Entry point for the script""" - with open(BENCHMARK_HTML, "w") as html_file: - write_to_html(html_file, "\n\n") - write_to_html(html_file, CSS_STYLE) - write_to_html(html_file, "\n\n") - write_to_html(html_file, "

Freetype Benchmark Results

\n") - - baseline_info = parse_info_file(os.path.join(BASELINE_DIR, "info.txt")) - benchmark_info = parse_info_file(os.path.join(BENCHMARK_DIR, "info.txt")) - - if baseline_info[1].strip() == benchmark_info[1].strip(): - write_to_html( - html_file, - '

Warning: Baseline and Benchmark have the same commit ID!

\n', - ) - - generate_info_table(html_file, baseline_info, benchmark_info) - - # Generate total results table - generate_total_results_table(html_file, BASELINE_DIR, BENCHMARK_DIR) - - # Generate results tables - for filename in os.listdir(BASELINE_DIR): - if filename.endswith(".txt") and not filename == "info.txt": - baseline_results = read_file(os.path.join(BASELINE_DIR, filename)) - benchmark_results = read_file(os.path.join(BENCHMARK_DIR, filename)) - - generate_results_table( - html_file, baseline_results, benchmark_results, filename - ) - - - write_to_html(html_file, "
Freetype Benchmark
\n") - write_to_html(html_file, "\n\n") - -def write_to_html(html_file, content): - """Write content to html file""" - html_file.write(content) - - -def read_file(file_path): - """Read file and return list of lines""" - with open(file_path, "r") as f: - return f.readlines() - - -def parse_info_file(info_file): - """Get info from info.txt file and return as list""" - info = read_file(info_file) - info[1] = '{}\n'.format(GITLAB_URL, info[1].strip(), info[1][:8]) - return info - - -def generate_info_table(html_file, baseline_info, benchmark_info): - """Prepare info table for html""" - write_to_html(html_file, "

Info

\n") - write_to_html(html_file, '\n') - write_to_html( - html_file, "\n" - ) - info_list = ["Parameters", "Commit ID", "Commit Date", "Branch"] - for info, baseline_line, benchmark_line in zip( - info_list, baseline_info, benchmark_info - ): - write_to_html( - html_file, - '\n'.format( - info, baseline_line.strip(), benchmark_line.strip() - ), - ) - write_to_html(html_file, "
InfoBaselineBenchmark
{}{}{}

") - write_to_html(html_file, "

* Average time for single iteration. Smaller values are better.

") - write_to_html(html_file, "

** N count in (x | y) format is for showing baseline and benchmark N counts seperately when they differs.

") - - -def generate_total_results_table(html_file, baseline_dir, benchmark_dir): - """Prepare total results table for html""" - - # This dictionary will store aggregated results. - test_results = {test: {"baseline": 0, "benchmark": 0, "n_baseline": 0, "n_benchmark": 0} for test in [ - "Load", "Load_Advances (Normal)", "Load_Advances (Fast)", "Load_Advances (Unscaled)", "Render", - "Get_Glyph", "Get_Char_Index", "Iterate CMap", "New_Face", "Embolden", "Stroke", "Get_BBox", - "Get_CBox", "New_Face & load glyph(s)" - ]} - - for filename in os.listdir(baseline_dir): - if filename.endswith(".txt") and not filename == "info.txt": - - baseline_results = read_file(os.path.join(baseline_dir, filename)) - benchmark_results = read_file(os.path.join(benchmark_dir, filename)) - - for baseline_line, benchmark_line in zip(baseline_results, benchmark_results): - if baseline_line.startswith(" "): - baseline_match = re.match(r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", baseline_line) - benchmark_match = re.match(r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", benchmark_line) - - if baseline_match and benchmark_match: - test = baseline_match.group(1).strip() - baseline_value = float(baseline_match.group(2)) - benchmark_value = float(benchmark_match.group(2)) - baseline_n = int(baseline_match.group(3)) - benchmark_n = int(benchmark_match.group(3)) - - # Aggregate the results - if test in test_results: - test_results[test]["baseline"] += baseline_value - test_results[test]["benchmark"] += benchmark_value - test_results[test]["n_baseline"] += baseline_n - test_results[test]["n_benchmark"] += benchmark_n - - # Writing to HTML - write_to_html(html_file, "

Total Results

\n") - write_to_html(html_file, '\n') - write_to_html( - html_file, - '\ - \n' - ) - - total_baseline = total_benchmark = total_diff = total_n_baseline = total_n_benchmark = 0 - - for test, values in test_results.items(): - baseline = values["baseline"] - benchmark = values["benchmark"] - n_baseline = values["n_baseline"] - n_benchmark = values["n_benchmark"] - - n_display = f"{n_baseline} | {n_benchmark}" if n_baseline != n_benchmark else str(n_baseline) - - diff = ((baseline - benchmark) / baseline) * 100 - - # Calculate for total row - total_baseline += baseline - total_benchmark += benchmark - total_n_baseline += n_baseline - total_n_benchmark += n_benchmark - - # Check which value is smaller for color highlighting - baseline_color = "highlight" if baseline <= benchmark else "" - benchmark_color = "highlight" if benchmark <= baseline else "" - - write_to_html( - html_file, - f'\ - \ - \n' - ) - - total_diff = ((total_baseline - total_benchmark) / total_baseline) * 100 - total_n_display = f"{total_n_baseline} | {total_n_benchmark}" if total_n_baseline != total_n_benchmark else str(total_n_baseline) - - write_to_html( - html_file, - f'\ - \ - \n' - ) - - write_to_html(html_file, "
TestNBaseline (µs)Benchmark (µs)Difference (%)
{test}{n_display}{baseline:.1f}{benchmark:.1f}{diff:.1f}
TOTAL{total_n_display}{total_baseline:.1f}{total_benchmark:.1f}{total_diff:.1f}

\n") - - - -def generate_results_table(html_file, baseline_results, benchmark_results, filename): - """Prepare results table for html""" - fontname = [ - line.split("/")[-1].strip("'")[:-2] - for line in baseline_results - if line.startswith("ftbench results for font") - ][0] - - write_to_html(html_file, "

Results for {}

\n".format(fontname)) - write_to_html(html_file, '\n') - write_to_html( - html_file, - '\ - \ - \ - \n'.format( - os.path.join(BASELINE_DIR, filename[:-4]), - os.path.join(BENCHMARK_DIR, filename[:-4]), - ), - ) - - total_n = total_time_baseline = total_time_benchmark = total_difference = 0 - - for baseline_line, benchmark_line in zip(baseline_results, benchmark_results): - if baseline_line.startswith(" "): - baseline_match = re.match( - r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", baseline_line - ) - benchmark_match = re.match( - r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", benchmark_line - ) - - if baseline_match and benchmark_match: - baseline_value = float(baseline_match.group(2)) - benchmark_value = float(benchmark_match.group(2)) - - percentage_diff = ( - (baseline_value - benchmark_value) / baseline_value - ) * 100 - - baseline_n = baseline_match.group(3) - benchmark_n = benchmark_match.group(3) - - n = ( - baseline_n - if baseline_n == benchmark_n - else baseline_n + " | " + benchmark_n - ) - - total_n += int(baseline_n) - total_n += int(benchmark_n) - total_time_baseline += baseline_value - total_time_benchmark += benchmark_value - - - if baseline_value > benchmark_value: - write_to_html( - html_file, - '\ - \n'.format( - baseline_match.group(1), - n, - baseline_value, - benchmark_value, - percentage_diff, - ), - ) - else: - write_to_html( - html_file, - '\ - \n'.format( - baseline_match.group(1), - n, - baseline_value, - benchmark_value, - percentage_diff, - ), - ) - - write_to_html( - html_file, - '\ - \n'.format( - total_n, total_time_baseline, total_time_benchmark, (total_time_baseline - total_time_benchmark) / total_time_baseline * -100 - ), - ) - write_to_html(html_file, "
TestN* Baseline (µs)* Benchmark (µs)Difference (%)
{}{}{:.1f}{:.1f}{:.1f}
{}{}{:.1f}{:.1f}{:.1f}
TOTAL{}{:.1f}{:.1f}{:.1f}

\n") - -if __name__ == "__main__": - main()