This commit is created: testing.mk (also stubbed to fretype.mk), tohtml.py, ftbench.c as the version on the freetype-demos and test fontsgsoc-2023-ahmet-final
parent
c4073d8251
commit
96c4dc98d3
11 changed files with 2196 additions and 0 deletions
@ -0,0 +1,173 @@ |
||||
# Define a few important files and directories.
|
||||
FTBENCH_DIR = $(TOP_DIR)/src/tools/ftbench
|
||||
FTBENCH_SRC = $(FTBENCH_DIR)/ftbench.c
|
||||
FTBENCH_OBJ = $(OBJ_DIR)/bench.$(SO)
|
||||
FTBENCH_BIN = $(OBJ_DIR)/bench$E
|
||||
INCLUDES = $(TOP_DIR)/include
|
||||
FONTS = $(wildcard $(FTBENCH_DIR)/fonts/*.ttf)
|
||||
|
||||
# Define objects.
|
||||
BASELINE_DIR = $(OBJ_DIR)/baseline/
|
||||
BENCHMARK_DIR = $(OBJ_DIR)/benchmark/
|
||||
BASELINE_INFO = $(BASELINE_DIR)info.txt
|
||||
BENCHMARK_INFO = $(BENCHMARK_DIR)info.txt
|
||||
HTMLCREATOR_SRC = $(FTBENCH_DIR)/src/tohtml.py
|
||||
HTMLCREATOR = $(OBJ_DIR)/tohtml.py
|
||||
HTMLFILE = $(OBJ_DIR)/benchmark.html
|
||||
|
||||
# Define flags, create default values in case of not inputted by user.
|
||||
FTBENCH_FLAG ?= -c 1000 -w 100
|
||||
|
||||
# Define all test fonts in the fonts folder.
|
||||
BASELINE = $(addprefix $(BASELINE_DIR), $(notdir $(FONTS:.ttf=.txt)))
|
||||
BENCHMARK = $(addprefix $(BENCHMARK_DIR), $(notdir $(FONTS:.ttf=.txt)))
|
||||
|
||||
# Define including stuff to commit ftbench.c
|
||||
FT_INCLUDES := $(OBJ_BUILD) \
|
||||
$(INCLUDES)
|
||||
|
||||
# Define libraries to compile ftbench.c
|
||||
FTLIB := $(LIB_DIR)/$(LIBRARY).$A
|
||||
|
||||
# The way of compiling ftbench.c
|
||||
COMPILE = $(CC) $(ANSIFLAGS) \
|
||||
$(INCLUDES:%=$I%) \
|
||||
$(CFLAGS)
|
||||
|
||||
INCLUDES := $(subst /,$(COMPILER_SEP),$(FT_INCLUDES))
|
||||
|
||||
# Enable C99 for gcc to avoid warnings.
|
||||
# Note that clang++ aborts with an error if we use `-std=C99',
|
||||
# so check for `++' in $(CC) also.
|
||||
ifneq ($(findstring -pedantic,$(COMPILE)),) |
||||
ifeq ($(findstring ++,$(CC)),)
|
||||
COMPILE += -std=c99
|
||||
endif
|
||||
endif |
||||
|
||||
# Decide the way of compile ftbench.c regarding to platform
|
||||
ifeq ($(PLATFORM),unix) |
||||
# `LDFLAGS` comes from the `configure` script (via FreeType's
|
||||
# `builds/unix/unix-cc.mk`), holding all linker flags necessary to
|
||||
# link the FreeType library.
|
||||
LINK_CMD = $(LIBTOOL) --mode=link $(CCraw) \
|
||||
$(subst /,$(COMPILER_SEP),$(LDFLAGS))
|
||||
LINK_LIBS = $(subst /,$(COMPILER_SEP),$(FTLIB) $(EFENCE))
|
||||
else |
||||
LINK_CMD = $(CC) $(subst /,$(COMPILER_SEP),$(LDFLAGS))
|
||||
ifeq ($(PLATFORM),unixdev)
|
||||
# For the pure `make` call (without using `configure`) we have to add
|
||||
# all needed libraries manually.
|
||||
LINK_LIBS := $(subst /,$(COMPILER_SEP),$(FTLIB) $(EFENCE)) \
|
||||
-lm -lrt -lz -lbz2 -lpthread
|
||||
LINK_LIBS += $(shell pkg-config --libs libpng)
|
||||
LINK_LIBS += $(shell pkg-config --libs harfbuzz)
|
||||
LINK_LIBS += $(shell pkg-config --libs libbrotlidec)
|
||||
LINK_LIBS += $(shell pkg-config --libs librsvg-2.0)
|
||||
else
|
||||
LINK_LIBS = $(subst /,$(COMPILER_SEP),$(FTLIB) $(EFENCE))
|
||||
endif
|
||||
endif |
||||
|
||||
# Only on Windows we might fall back on GDI+ for PNG saving
|
||||
ifeq ($(OS),Windows_NT) |
||||
LINK_LIBS += -lgdiplus
|
||||
endif |
||||
|
||||
####################################################################
|
||||
#
|
||||
# POSIX TERMIOS: Do not define if you use OLD U*ix like 4.2BSD.
|
||||
#
|
||||
ifeq ($(PLATFORM),unix) |
||||
EXTRAFLAGS = $DUNIX $DHAVE_POSIX_TERMIOS
|
||||
endif |
||||
|
||||
ifeq ($(PLATFORM),unixdev) |
||||
EXTRAFLAGS = $DUNIX $DHAVE_POSIX_TERMIOS
|
||||
endif |
||||
|
||||
|
||||
# Create directories for baseline and benchmark
|
||||
$(BASELINE_DIR) $(BENCHMARK_DIR): |
||||
@mkdir -p $@
|
||||
|
||||
# Create ftbench object
|
||||
$(FTBENCH_OBJ): $(FTBENCH_SRC) |
||||
@$(COMPILE) $T$(subst /,$(COMPILER_SEP),$@ $<) $(EXTRAFLAGS)
|
||||
@echo "Object created."
|
||||
|
||||
# Build ftbench
|
||||
$(FTBENCH_BIN): $(FTBENCH_OBJ) |
||||
@echo "Linking ftbench..."
|
||||
@$(LINK_CMD) $T$(subst /,$(COMPILER_SEP),$@ $<) $(LINK_LIBS)
|
||||
@echo "Built."
|
||||
|
||||
# Copy tohtml.py script into objs folder
|
||||
.PHONY: copy-html-script |
||||
copy-html-script: |
||||
@cp $(HTMLCREATOR_SRC) $(OBJ_DIR)
|
||||
@echo "Copied tohtml.py to $(OBJ_DIR)"
|
||||
|
||||
####################################################################
|
||||
#
|
||||
# Create Baseline:
|
||||
# Flags, commit hash, commit date, branch name infos printed into info.txt
|
||||
# All fonts are tested by ftbench, results printed to related .txt
|
||||
#
|
||||
.PHONY: baseline |
||||
baseline: $(FTBENCH_BIN) $(BASELINE_DIR) |
||||
@$(RM) -f $(BASELINE)
|
||||
@echo "Creating baseline..."
|
||||
@echo "$(FTBENCH_FLAG)" > $(BASELINE_INFO)
|
||||
@echo "`git -C $(TOP_DIR) rev-parse HEAD`" >> $(BASELINE_INFO)
|
||||
@echo "`git -C $(TOP_DIR) show -s --format=%ci HEAD`" >> $(BASELINE_INFO)
|
||||
@echo "`git -C $(TOP_DIR) rev-parse --abbrev-ref HEAD`" >> $(BASELINE_INFO)
|
||||
@fonts=($(FONTS)); \
|
||||
total_fonts=$${#fonts[@]}; \
|
||||
step=0; \
|
||||
for font in $${fonts[@]}; do \
|
||||
step=$$((step+1)); \
|
||||
percent=$$((step * 100 / total_fonts)); \
|
||||
printf "\nProcessing %d%%..." $$percent; \
|
||||
$(FTBENCH_BIN) $(FTBENCH_FLAG) "$$font" > $(BASELINE_DIR)$$(basename $$font .ttf).txt; \
|
||||
done
|
||||
@echo "Baseline created."
|
||||
|
||||
####################################################################
|
||||
#
|
||||
# Create Benchmark:
|
||||
# Flags, commit hash, commit date, branch name infos printed into info.txt
|
||||
# All fonts are tested by ftbench, results printed to related .txt
|
||||
# Result page is created by tohtml.py
|
||||
#
|
||||
.PHONY: benchmark |
||||
benchmark: $(FTBENCH_BIN) $(BENCHMARK_DIR) copy-html-script |
||||
@$(RM) -f $(BENCHMARK) $(HTMLFILE)
|
||||
@echo "Creating benchmark..."
|
||||
@echo "$(FTBENCH_FLAG)" > $(BENCHMARK_INFO)
|
||||
@echo "`git -C $(TOP_DIR) rev-parse HEAD`" >> $(BENCHMARK_INFO)
|
||||
@echo "`git -C $(TOP_DIR) show -s --format=%ci HEAD`" >> $(BENCHMARK_INFO)
|
||||
@echo "`git -C $(TOP_DIR) rev-parse --abbrev-ref HEAD`" >> $(BENCHMARK_INFO)
|
||||
@fonts=($(FONTS)); \
|
||||
total_fonts=$${#fonts[@]}; \
|
||||
step=0; \
|
||||
for font in $${fonts[@]}; do \
|
||||
step=$$((step+1)); \
|
||||
percent=$$((step * 100 / total_fonts)); \
|
||||
printf "\nProcessing %d%%..." $$percent; \
|
||||
$(FTBENCH_BIN) $(FTBENCH_FLAG) "$$font" > $(BENCHMARK_DIR)$$(basename $$font .ttf).txt; \
|
||||
done
|
||||
@$(PYTHON) $(HTMLCREATOR) $(OBJ_DIR)
|
||||
@echo "Benchmark results created in file: $(HTMLFILE)"
|
||||
|
||||
####################################################################
|
||||
#
|
||||
# Clean Benchmark:
|
||||
# All created stuff is cleaned.
|
||||
#
|
||||
.PHONY: clean-benchmark |
||||
clean-benchmark: |
||||
@echo "Cleaning..."
|
||||
@$(RM) $(FTBENCH_BIN) $(FTBENCH_OBJ)
|
||||
@$(RM) -rf $(BASELINE_DIR) $(BENCHMARK_DIR) $(HTMLFILE) $(HTMLCREATOR)
|
||||
@echo "Cleaned"
|
@ -0,0 +1,45 @@ |
||||
ftbench |
||||
======== |
||||
|
||||
ftbench is a program designed to run FreeType benchmarks. It accepts various options and a font name to run specific tests on font rendering operations. |
||||
|
||||
Each test may involve tasks such as: |
||||
|
||||
. Initializing the library |
||||
. Opening the font file |
||||
. Loading and optionally rendering each glyph |
||||
. Comparing results with cached versions (if available) |
||||
. Configuring specific charmap indices, load flags, etc. |
||||
|
||||
Usage is time-limited or can be explicitly set to use a maximum number of iterations per test. |
||||
|
||||
|
||||
Command line options |
||||
-------------------- |
||||
|
||||
-C Compare with cached version (if available). |
||||
-c N Use at most N iterations for each test (0 means time-limited). |
||||
-e E Set specific charmap index E. |
||||
-f L Use hex number L as load flags (see FT_LOAD_XXX'). -H NAME Use PS hinting engine NAME (default is adobe'). |
||||
-I VER Use TT interpreter version VER (default is version 40). |
||||
-i I-J Forward or reverse range of glyph indices to use. |
||||
-l N Set LCD filter to N (default is 0: none). |
||||
-m M Set maximum cache size to M KiByte (default is 1024). |
||||
-p Preload font file in memory. |
||||
-r N Set render mode to N (default is 0: normal). |
||||
-s S Use S ppem as face size (default is 10ppem). |
||||
-t T Use at most T seconds per bench (default is 2). |
||||
-w N Use N iterations for warming up before each test. |
||||
|
||||
-b tests Perform chosen tests (default is all). |
||||
-v Show version. |
||||
|
||||
Compilation |
||||
----------- |
||||
|
||||
make baseline To create a baseline for your benchmarks, use the `make baseline` command. This will compile the ftbench.c and create a set of baseline measurements in the objs/baseline/ directory. |
||||
|
||||
make benchmark To run the benchmarks, use the `make benchmark` command. The results will be stored in the objs/benchmark/ directory. It will copy tohtml.py script to objs/ and generate a html file. |
||||
|
||||
make clean-benchmark To remove all generated benchmark files and clean the objs directory, use the `make clean-benchmark` command. |
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,44 @@ |
||||
/*
|
||||
* This is a cheap replacement for getopt() because that routine is not |
||||
* available on some platforms and behaves differently on other platforms. |
||||
* |
||||
* This code is hereby expressly placed in the public domain. |
||||
* mleisher@crl.nmsu.edu (Mark Leisher) |
||||
* 10 October 1997 |
||||
*/ |
||||
|
||||
#ifndef MLGETOPT_H_ |
||||
#define MLGETOPT_H_ |
||||
|
||||
#ifdef VMS |
||||
#include <stdio.h> |
||||
#define getopt local_getopt |
||||
#define optind local_optind |
||||
#define opterr local_opterr |
||||
#define optarg local_optarg |
||||
#endif |
||||
|
||||
#ifdef __cplusplus |
||||
extern "C" { |
||||
#endif |
||||
|
||||
extern int opterr; |
||||
extern int optind; |
||||
extern char* optarg; |
||||
|
||||
extern int getopt( |
||||
#ifdef __STDC__ |
||||
int argc, |
||||
char* const* argv, |
||||
const char* pattern |
||||
#endif |
||||
); |
||||
|
||||
#ifdef __cplusplus |
||||
} |
||||
#endif |
||||
|
||||
#endif /* MLGETOPT_H_ */ |
||||
|
||||
|
||||
/* End */ |
@ -0,0 +1,318 @@ |
||||
"""This script generates a HTML file from the results of ftbench""" |
||||
import os |
||||
import re |
||||
import sys |
||||
|
||||
GITLAB_URL = "https://gitlab.freedesktop.org/freetype/freetype/-/commit/" |
||||
CSS_STYLE = """ |
||||
<style> |
||||
table { |
||||
table-layout: fixed; |
||||
} |
||||
th, td { |
||||
padding: 3px; |
||||
text-align: center; |
||||
} |
||||
th { |
||||
background-color: #ccc; |
||||
color: black; |
||||
} |
||||
.warning{ |
||||
color: red; |
||||
} |
||||
.col1 { |
||||
background-color: #eee; |
||||
} |
||||
|
||||
|
||||
.highlight { |
||||
background-color: #0a0; |
||||
} |
||||
</style> |
||||
""" |
||||
OBJ_DIR = sys.argv[1] |
||||
BASELINE_DIR = os.path.join(OBJ_DIR, "baseline") |
||||
BENCHMARK_DIR = os.path.join(OBJ_DIR, "benchmark") |
||||
BENCHMARK_HTML = os.path.join(OBJ_DIR, "benchmark.html") |
||||
|
||||
FONT_COUNT = 5 |
||||
|
||||
WARNING_SAME_COMMIT = "Warning: Baseline and Benchmark have the same commit ID!" |
||||
INFO_1 = "* Average time for single iteration. Smaller values are better." |
||||
INFO_2 = "* If a value in the 'Iterations' column is given as '<i>x | y</i>', values <i>x</i> and <i>y</i> give the number of iterations in the baseline and the benchmark test, respectively." |
||||
|
||||
|
||||
def main(): |
||||
"""Entry point for theq script""" |
||||
with open(BENCHMARK_HTML, "w") as html_file: |
||||
write_to_html(html_file, "<html>\n<head>\n") |
||||
write_to_html(html_file, CSS_STYLE) |
||||
write_to_html(html_file, "</head>\n<body>\n") |
||||
write_to_html(html_file, "<h1>Freetype Benchmark Results</h1>\n") |
||||
|
||||
baseline_info = parse_info_file(os.path.join(BASELINE_DIR, "info.txt")) |
||||
benchmark_info = parse_info_file(os.path.join(BENCHMARK_DIR, "info.txt")) |
||||
|
||||
if baseline_info[1].strip() == benchmark_info[1].strip(): |
||||
write_to_html( |
||||
html_file, |
||||
f'<h2 class="warning">{WARNING_SAME_COMMIT}</h2>\n', |
||||
) |
||||
|
||||
generate_info_table(html_file, baseline_info, benchmark_info) |
||||
|
||||
# Generate total results table |
||||
generate_total_results_table(html_file, BASELINE_DIR, BENCHMARK_DIR) |
||||
|
||||
# Generate results tables |
||||
for filename in os.listdir(BASELINE_DIR): |
||||
if filename.endswith(".txt") and not filename == "info.txt": |
||||
baseline_results = read_file(os.path.join(BASELINE_DIR, filename)) |
||||
benchmark_results = read_file(os.path.join(BENCHMARK_DIR, filename)) |
||||
|
||||
generate_results_table( |
||||
html_file, baseline_results, benchmark_results, filename |
||||
) |
||||
|
||||
write_to_html(html_file, "<center>Freetype Benchmark</center>\n") |
||||
write_to_html(html_file, "</body>\n</html>\n") |
||||
|
||||
|
||||
def write_to_html(html_file, content): |
||||
"""Write content to html file""" |
||||
html_file.write(content) |
||||
|
||||
|
||||
def read_file(file_path): |
||||
"""Read file and return list of lines""" |
||||
with open(file_path, "r") as f: |
||||
return f.readlines() |
||||
|
||||
|
||||
def parse_info_file(info_file): |
||||
"""Get info from info.txt file and return as list""" |
||||
info = read_file(info_file) |
||||
info[1] = f'<a href="{GITLAB_URL}{info[1].strip()}">{info[1][:8]}</a>\n' |
||||
return info |
||||
|
||||
|
||||
def generate_info_table(html_file, baseline_info, benchmark_info): |
||||
"""Prepare info table for html""" |
||||
write_to_html(html_file, "<h2>Info</h2>\n") |
||||
write_to_html(html_file, '<table border="1">\n') |
||||
write_to_html( |
||||
html_file, "<tr><th>Info</th><th>Baseline</th><th>Benchmark</th></tr>\n" |
||||
) |
||||
info_list = ["Parameters", "Commit ID", "Commit Date", "Branch"] |
||||
for info, baseline_line, benchmark_line in zip( |
||||
info_list, baseline_info, benchmark_info |
||||
): |
||||
write_to_html( |
||||
html_file, |
||||
f'<tr><td class="col1">{info}</td><td>{baseline_line.strip()}</td><td>{benchmark_line.strip()}</td></tr>\n' |
||||
) |
||||
write_to_html(html_file, "</table><br/>") |
||||
write_to_html(html_file, f"<p>{INFO_1}</p>") |
||||
write_to_html(html_file, f"<p>{INFO_2}</p>") |
||||
|
||||
|
||||
def generate_total_results_table(html_file, baseline_dir, benchmark_dir): |
||||
"""Prepare total results table for html""" |
||||
|
||||
# This dictionary will store aggregated results. |
||||
test_results = { |
||||
test: {"baseline": 0, "benchmark": 0, "n_baseline": 0, "n_benchmark": 0} |
||||
for test in [ |
||||
"Load", |
||||
"Load_Advances (Normal)", |
||||
"Load_Advances (Fast)", |
||||
"Load_Advances (Unscaled)", |
||||
"Render", |
||||
"Get_Glyph", |
||||
"Get_Char_Index", |
||||
"Iterate CMap", |
||||
"New_Face", |
||||
"Embolden", |
||||
"Stroke", |
||||
"Get_BBox", |
||||
"Get_CBox", |
||||
"New_Face & load glyph(s)", |
||||
] |
||||
} |
||||
|
||||
total_time = 0 |
||||
|
||||
for filename in os.listdir(baseline_dir): |
||||
if filename.endswith(".txt") and not filename == "info.txt": |
||||
baseline_results = read_file(os.path.join(baseline_dir, filename)) |
||||
benchmark_results = read_file(os.path.join(benchmark_dir, filename)) |
||||
|
||||
for baseline_line, benchmark_line in zip( |
||||
baseline_results, benchmark_results |
||||
): |
||||
if baseline_line.startswith("Total time:"): |
||||
baseline_match = re.match(r"Total time: (\d+)s", baseline_line) |
||||
benchmark_match = re.match(r"Total time: (\d+)s", benchmark_line) |
||||
|
||||
if baseline_match and benchmark_match: |
||||
total_time += int(baseline_match.group(1)) |
||||
total_time += int(benchmark_match.group(1)) |
||||
|
||||
if baseline_line.startswith(" "): |
||||
baseline_match = re.match( |
||||
r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", baseline_line |
||||
) |
||||
benchmark_match = re.match( |
||||
r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", |
||||
benchmark_line, |
||||
) |
||||
|
||||
if baseline_match and benchmark_match: |
||||
test = baseline_match.group(1).strip() |
||||
baseline_value = float(baseline_match.group(2)) |
||||
benchmark_value = float(benchmark_match.group(2)) |
||||
baseline_n = int(baseline_match.group(3)) |
||||
benchmark_n = int(benchmark_match.group(3)) |
||||
|
||||
# Aggregate the results |
||||
if test in test_results: |
||||
test_results[test]["baseline"] += baseline_value |
||||
test_results[test]["benchmark"] += benchmark_value |
||||
test_results[test]["n_baseline"] += baseline_n |
||||
test_results[test]["n_benchmark"] += benchmark_n |
||||
|
||||
# Writing to HTML |
||||
write_to_html(html_file, "<h2>Total Results</h2>\n") |
||||
write_to_html(html_file, '<table border="1">\n') |
||||
write_to_html( |
||||
html_file, |
||||
"<tr><th>Test</th><th>Iterations</th><th>* Baseline (µs)</th>\ |
||||
<th>* Benchmark (µs)</th><th>Difference (%)</th></tr>\n", |
||||
) |
||||
|
||||
total_baseline = total_benchmark = total_n_baseline = total_n_benchmark = 0 |
||||
|
||||
for test, values in test_results.items(): |
||||
baseline = values["baseline"] / FONT_COUNT |
||||
benchmark = values["benchmark"] / FONT_COUNT |
||||
n_baseline = values["n_baseline"] / FONT_COUNT |
||||
n_benchmark = values["n_benchmark"] / FONT_COUNT |
||||
|
||||
n_display = ( |
||||
f"{n_baseline:.0f} | {n_benchmark:.0f}" |
||||
if n_baseline != n_benchmark |
||||
else int(n_baseline) |
||||
) |
||||
|
||||
diff = ( |
||||
((baseline - benchmark) / baseline) * 100 |
||||
if not (baseline - benchmark) == 0 |
||||
else 0 |
||||
) |
||||
|
||||
# Calculate for total row |
||||
total_baseline += baseline |
||||
total_benchmark += benchmark |
||||
total_n_baseline += n_baseline |
||||
total_n_benchmark += n_benchmark |
||||
|
||||
# Check which value is smaller for color highlighting |
||||
baseline_color = "highlight" if baseline <= benchmark else "" |
||||
benchmark_color = "highlight" if benchmark <= baseline else "" |
||||
|
||||
write_to_html( |
||||
html_file, |
||||
f'<tr><td class="col1">{test}</td><td>{n_display}</td>\ |
||||
<td class="{baseline_color}">{baseline:.1f}</td>\ |
||||
<td class="{benchmark_color}">{benchmark:.1f}</td><td>{diff:.1f}</td></tr>\n', |
||||
) |
||||
|
||||
write_to_html( |
||||
html_file, |
||||
f'<tr><td class="col1">Total duration for all tests:</td><td class="col1" colspan="4">{total_time:.0f} s</td>', |
||||
) |
||||
|
||||
write_to_html(html_file, "</table>\n") |
||||
|
||||
|
||||
def generate_results_table(html_file, baseline_results, benchmark_results, filename): |
||||
"""Prepare results table for html""" |
||||
fontname = [ |
||||
line.split("/")[-1].strip("'")[:-2] |
||||
for line in baseline_results |
||||
if line.startswith("ftbench results for font") |
||||
][0] |
||||
|
||||
write_to_html(html_file, f"<h3>Results for {fontname}</h2>\n") |
||||
write_to_html(html_file, '<table border="1">\n') |
||||
write_to_html( |
||||
html_file, |
||||
f'<tr><th>Test</th><th>Iterations</th>\ |
||||
<th>* <a href="{ os.path.join("./baseline/", filename[:-4])}.txt">Baseline</a> (µs)</th>\ |
||||
<th>* <a href="{ os.path.join("./benchmark/", filename[:-4])}.txt">Benchmark</a> (µs)</th>\ |
||||
<th>Difference (%)</th></tr>\n' |
||||
) |
||||
|
||||
total_n = total_time = 0 |
||||
|
||||
for baseline_line, benchmark_line in zip(baseline_results, benchmark_results): |
||||
if baseline_line.startswith("Total time:"): |
||||
baseline_match = re.match(r"Total time: (\d+)s", baseline_line) |
||||
benchmark_match = re.match(r"Total time: (\d+)s", benchmark_line) |
||||
|
||||
if baseline_match and benchmark_match: |
||||
total_time += int(baseline_match.group(1)) |
||||
total_time += int(benchmark_match.group(1)) |
||||
|
||||
if baseline_line.startswith(" "): |
||||
baseline_match = re.match( |
||||
r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", baseline_line |
||||
) |
||||
benchmark_match = re.match( |
||||
r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", benchmark_line |
||||
) |
||||
|
||||
if baseline_match and benchmark_match: |
||||
baseline_value = float(baseline_match.group(2)) |
||||
benchmark_value = float(benchmark_match.group(2)) |
||||
|
||||
percentage_diff = ( |
||||
((baseline_value - benchmark_value) / baseline_value) * 100 |
||||
if not (baseline_value - benchmark_value) == 0 |
||||
else 0 |
||||
) |
||||
|
||||
baseline_n = baseline_match.group(3) |
||||
benchmark_n = benchmark_match.group(3) |
||||
|
||||
n = ( |
||||
baseline_n |
||||
if baseline_n == benchmark_n |
||||
else baseline_n + " | " + benchmark_n |
||||
) |
||||
|
||||
total_n += int(baseline_n) |
||||
total_n += int(benchmark_n) |
||||
|
||||
# Check which value is smaller for color highlighting |
||||
baseline_color = ( |
||||
"highlight" if baseline_value <= benchmark_value else "" |
||||
) |
||||
benchmark_color = ( |
||||
"highlight" if benchmark_value <= baseline_value else "" |
||||
) |
||||
|
||||
write_to_html( |
||||
html_file, |
||||
f'<tr><td class="col1">{baseline_match.group(1)}</td><td>{n}</td>\ |
||||
<td class="{baseline_color}">{baseline_value:.1f}</td><td class="{benchmark_color}">{benchmark_value:.1f}</td><td>{percentage_diff:.1f}</td></tr>\n', |
||||
) |
||||
|
||||
write_to_html( |
||||
html_file, |
||||
f'<tr><td class="col1">Total duration for the font:</td><td class="col1" colspan="4">{total_time:.0f} s</td></table>\n', |
||||
) |
||||
|
||||
|
||||
if __name__ == "__main__": |
||||
main() |
Loading…
Reference in new issue