Add node and php to benchmark dashboard

pull/5186/head
Yilun Chong 6 years ago
parent 4426cb5733
commit cee0447fd9
  1. 3
      benchmarks/Makefile.am
  2. 16
      benchmarks/js/js_benchmark.js
  3. 20
      benchmarks/php/PhpBenchmark.php
  4. 21
      benchmarks/python/py_benchmark.py
  5. 48
      benchmarks/util/result_parser.py
  6. 29
      benchmarks/util/result_uploader.py
  7. 24
      kokoro/linux/benchmark/build.sh

@ -260,7 +260,7 @@ go-benchmark: go_protoc_middleman
@echo 'all_data=""' >> go-benchmark
@echo 'conf=()' >> go-benchmark
@echo 'data_files=()' >> go-benchmark
@echo 'for arg in $$@; do if [[ $${arg:0:1} == "-" ]]; then conf+=($$arg); else data_files+=("../$$arg"); fi; done' >> go-benchmark
@echo 'for arg in $$@; do if [[ $${arg:0:1} == "-" ]]; then conf+=($$arg); else data_files+=("$$arg"); fi; done' >> go-benchmark
@echo 'go test -bench=. $${conf[*]} -- $${data_files[*]}' >> go-benchmark
@echo 'cd ..' >> go-benchmark
@chmod +x go-benchmark
@ -533,7 +533,6 @@ php-c-benchmark: proto3_middleman_php generate_proto3_data php_c_extension php_c
@echo '#! /bin/bash' > php-c-benchmark
@echo 'export PROTOBUF_PHP_SRCDIR="$$(cd $(top_srcdir) && pwd)/php/src"' >> php-c-benchmark
@echo 'export PROTOBUF_PHP_EXTDIR="$$PROTOBUF_PHP_SRCDIR/../ext/google/protobuf/modules"' >> php-c-benchmark
@echo 'echo "$$PROTOBUF_PHP_EXTDIR/protobuf.so"' >> php-c-benchmark
@echo 'cd tmp/php' >> php-c-benchmark
@echo 'export CURRENT_DIR=$$(pwd)' >> php-c-benchmark
@echo 'php -d auto_prepend_file="autoload.php" -d include_path="$$(pwd)" -d extension="$$PROTOBUF_PHP_EXTDIR/protobuf.so" Google/Protobuf/Benchmark/PhpBenchmark.php $$@' >> php-c-benchmark

@ -18,6 +18,7 @@ function getNewPrototype(name) {
}
var results = [];
var json_file = "";
console.log("#####################################################");
console.log("Js Benchmark: ");
@ -25,6 +26,11 @@ process.argv.forEach(function(filename, index) {
if (index < 2) {
return;
}
if (filename.indexOf("--json_output") != -1) {
json_file = filename.replace(/^--json_output=/, '');
return;
}
var benchmarkDataset =
proto.benchmarks.BenchmarkDataset.deserializeBinary(fs.readFileSync(filename));
var messageList = [];
@ -55,8 +61,8 @@ process.argv.forEach(function(filename, index) {
results.push({
filename: filename,
benchmarks: {
protobufjs_decoding: senarios.benches[0] * totalBytes,
protobufjs_encoding: senarios.benches[1] * totalBytes
protobufjs_decoding: senarios.benches[0] * totalBytes / 1024 / 1024,
protobufjs_encoding: senarios.benches[1] * totalBytes / 1024 / 1024
}
})
@ -68,3 +74,9 @@ process.argv.forEach(function(filename, index) {
});
console.log("#####################################################");
if (json_file != "") {
fs.writeFile(json_file, JSON.stringify(results), (err) => {
if (err) throw err;
});
}

@ -93,7 +93,7 @@ function getMessageName(&$dataset) {
}
}
function runBenchmark($file) {
function runBenchmark($file, $behavior_prefix) {
$datafile = fopen($file, "r") or die("Unable to open file " . $file);
$bytes = fread($datafile, filesize($file));
$dataset = new BenchmarkDataset(NULL);
@ -119,8 +119,8 @@ function runBenchmark($file) {
return array(
"filename" => $file,
"benchmarks" => array(
"parse_php" => $parse_benchmark->runBenchmark(),
"serailize_php" => $serialize_benchmark->runBenchmark()
$behavior_prefix . "_parse" => $parse_benchmark->runBenchmark(),
$behavior_prefix . "_serailize" => $serialize_benchmark->runBenchmark()
),
"message_name" => $dataset->getMessageName()
);
@ -129,15 +129,27 @@ function runBenchmark($file) {
// main
$json_output = false;
$results = array();
$behavior_prefix = "";
foreach ($argv as $index => $arg) {
if ($index == 0) {
continue;
}
if ($arg == "--json") {
$json_output = true;
} else if (strpos($arg, "--behavior_prefix") == 0) {
$behavior_prefix = str_replace("--behavior_prefix=", "", $arg);
}
}
foreach ($argv as $index => $arg) {
if ($index == 0) {
continue;
}
if (substr($arg, 0, 2) == "--") {
continue;
} else {
array_push($results, runBenchmark($arg));
array_push($results, runBenchmark($arg, $behavior_prefix));
}
}

@ -44,9 +44,13 @@ def run_one_test(filename):
data = open(filename).read()
benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
benchmark_dataset.ParseFromString(data)
total_bytes = 0
for payload in benchmark_dataset.payload:
total_bytes += len(payload)
benchmark_util = Benchmark(full_iteration=len(benchmark_dataset.payload),
module="py_benchmark",
setup_method="init")
setup_method="init",
total_bytes=total_bytes)
result={}
result["filename"] = filename
result["message_name"] = benchmark_dataset.message_name
@ -61,10 +65,11 @@ def run_one_test(filename):
def init(filename):
global benchmark_dataset, message_class, message_list, counter
global benchmark_dataset, message_class, message_list, counter, total_bytes
message_list=[]
counter = 0
data = open(os.path.dirname(sys.argv[0]) + "/../" + filename).read()
total_bytes = 0
data = open(filename).read()
benchmark_dataset = benchmarks_pb2.BenchmarkDataset()
benchmark_dataset.ParseFromString(data)
@ -85,6 +90,7 @@ def init(filename):
temp = message_class()
temp.ParseFromString(one_payload)
message_list.append(temp)
total_bytes += len(one_payload)
def parse_from_benchmark():
@ -101,11 +107,12 @@ def serialize_to_benchmark():
class Benchmark:
def __init__(self, module=None, test_method=None,
setup_method=None, full_iteration = 1):
setup_method=None, total_bytes=None, full_iteration = 1):
self.full_iteration = full_iteration
self.module = module
self.test_method = test_method
self.setup_method = setup_method
self.total_bytes = total_bytes
def set_test_method(self, test_method):
self.test_method = test_method
@ -130,7 +137,7 @@ class Benchmark:
t = timeit.timeit(stmt="%s(%s)" % (self.test_method, test_method_args),
setup=self.full_setup_code(setup_method_args),
number=reps);
return 1.0 * t / reps * (10 ** 9)
return self.total_bytes * 1.0 / 2 ** 20 / (1.0 * t / reps)
if __name__ == "__main__":
@ -144,10 +151,10 @@ if __name__ == "__main__":
for result in results:
print("Message %s of dataset file %s" % \
(result["message_name"], result["filename"]))
print("Average time for parse_from_benchmark: %.2f ns" % \
print("Average throughput for parse_from_benchmark: %.2f MB/s" % \
(result["benchmarks"][ \
args.behavior_prefix + "_parse_from_benchmark"]))
print("Average time for serialize_to_benchmark: %.2f ns" % \
print("Average throughput for serialize_to_benchmark: %.2f MB/s" % \
(result["benchmarks"][ \
args.behavior_prefix + "_serialize_to_benchmark"]))
print("")

@ -115,7 +115,6 @@ def __parse_synthetic_result(filename):
# behavior: results,
# ...
# },
# "message_name": STRING
# },
# ...
# ], #pure-python
@ -136,8 +135,7 @@ def __parse_python_result(filename):
"language": "python",
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": avg_size /
result["benchmarks"][behavior] * 1e9 / 2 ** 20
"throughput": result["benchmarks"][behavior]
})
@ -220,7 +218,7 @@ def __parse_go_result(filename):
continue
first_slash_index = result_list[0].find('/')
last_slash_index = result_list[0].rfind('/')
full_filename = result_list[0][first_slash_index+4:last_slash_index] # delete ../ prefix
full_filename = result_list[0][first_slash_index+1:last_slash_index]
total_bytes, _ = __get_data_size(full_filename)
behavior_with_suffix = result_list[0][last_slash_index+1:]
last_dash = behavior_with_suffix.rfind("-")
@ -236,11 +234,45 @@ def __parse_go_result(filename):
})
# Node/Php results example:
#
# [
# {
# "filename": string,
# "benchmarks": {
# behavior: results,
# ...
# },
# },
# ...
# ]
def __parse_js_php_result(filename, language):
if filename == "":
return
if filename[0] != '/':
filename = os.path.dirname(os.path.abspath(__file__)) + '/' + filename
with open(filename) as f:
results = json.loads(f.read())
for result in results:
_, avg_size = __get_data_size(result["filename"])
for behavior in result["benchmarks"]:
__results.append({
"language": language,
"dataFilename": __extract_file_name(result["filename"]),
"behavior": behavior,
"throughput": result["benchmarks"][behavior]
})
def get_result_from_file(cpp_file="",
java_file="",
python_file="",
go_file="",
synthetic_file=""):
synthetic_file="",
node_file="",
php_c_file="",
php_file=""):
results = {}
if cpp_file != "":
__parse_cpp_result(cpp_file)
@ -252,5 +284,11 @@ def get_result_from_file(cpp_file="",
__parse_go_result(go_file)
if synthetic_file != "":
__parse_synthetic_result(synthetic_file)
if node_file != "":
__parse_js_php_result(node_file, "node")
if php_file != "":
__parse_js_php_result(php_file, "php")
if php_c_file != "":
__parse_js_php_result(php_c_file, "php")
return __results

@ -59,13 +59,14 @@ def upload_result(result_list, metadata):
labels_string += ",|%s:%s|" % (key, result[key])
new_result["labels"] = labels_string[1:]
new_result["timestamp"] = _INITIAL_TIME
bq = big_query_utils.create_big_query()
row = big_query_utils.make_row(str(uuid.uuid4()), new_result)
if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET,
_TABLE + "$" + _NOW,
[row]):
print('Error when uploading result', new_result)
print(labels_string)
#
# bq = big_query_utils.create_big_query()
# row = big_query_utils.make_row(str(uuid.uuid4()), new_result)
# if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET,
# _TABLE + "$" + _NOW,
# [row]):
# print('Error when uploading result', new_result)
if __name__ == "__main__":
@ -82,6 +83,15 @@ if __name__ == "__main__":
parser.add_argument("-go", "--go_input_file",
help="The golang benchmark result file's name",
default="")
parser.add_argument("-node", "--node_input_file",
help="The node.js benchmark result file's name",
default="")
parser.add_argument("-php", "--php_input_file",
help="The pure php benchmark result file's name",
default="")
parser.add_argument("-php_c", "--php_c_input_file",
help="The php with c ext benchmark result file's name",
default="")
args = parser.parse_args()
metadata = get_metadata()
@ -90,5 +100,8 @@ if __name__ == "__main__":
cpp_file=args.cpp_input_file,
java_file=args.java_input_file,
python_file=args.python_input_file,
go_file=args.go_input_file
go_file=args.go_input_file,
node_file=args.node_input_file,
php_file=args.php_input_file,
php_c_file=args.php_c_input_file,
), metadata)

@ -19,7 +19,8 @@ fi
# download datasets for benchmark
cd benchmarks
./download_data.sh
datasets=`find . -type f -name "dataset.*.pb"`
datasets=$(for file in $(find . -type f -name "dataset.*.pb" -not -path "./tmp/*"); do echo "$(pwd)/$file"; done | xargs)
echo $datasets
cd $oldpwd
# build Python protobuf
@ -84,9 +85,24 @@ make java-benchmark
echo "benchmarking java..."
./java-benchmark -Cresults.file.options.file="tmp/java_result.json" $datasets
make js-benchmark
echo "benchmarking js..."
./js-benchmark $datasets --json_output=$(pwd)/tmp/node_result.json
make -j8 generate_proto3_data
proto3_datasets=$(for file in $datasets; do echo $(pwd)/tmp/proto3_data/${file#$(pwd)}; done | xargs)
echo $proto3_datasets
# build php benchmark
make -j8 php-benchmark
echo "benchmarking php..."
./php-benchmark $proto3_datasets --json --behavior_prefix="php" > tmp/php_result.json
make -j8 php-c-benchmark
echo "benchmarking php_c..."
./php-c-benchmark $proto3_datasets --json --behavior_prefix="php_c" > tmp/php_c_result.json
# upload result to bq
make python_add_init
env LD_LIBRARY_PATH="$oldpwd/src/.libs" python -m util.result_uploader -cpp="../tmp/cpp_result.json" -java="../tmp/java_result.json" \
-python="../tmp/python_result.json" -go="../tmp/go_result.txt"
env LD_LIBRARY_PATH="$oldpwd/src/.libs" python -m util.result_uploader -php="../tmp/php_result.json" -php_c="../tmp/php_c_result.json" \
-cpp="../tmp/cpp_result.json" -java="../tmp/java_result.json" -go="../tmp/go_result.txt" -python="../tmp/python_result.json" -node="../tmp/node_result.json"
cd $oldpwd

Loading…
Cancel
Save