Merge pull request #4158 from BSBandme/FixBenchmarks

Fix java benchmark to use parser, fix cpp benchmark new arena to use …
pull/4167/head
Yilun Chong 7 years ago committed by GitHub
commit 9f80df0269
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 11
      benchmarks/README.md
  2. 3
      benchmarks/cpp_benchmark.cc
  3. 18
      benchmarks/java/src/main/java/com/google/protobuf/ProtoBenchCaliper.java
  4. 4
      benchmarks/py_benchmark.py

@ -38,14 +38,15 @@ And you also need to make sure `pkg-config` is installed.
### Big data ### Big data
There's some optional big testing data which is not included in the directory initially, you need to There's some optional big testing data which is not included in the directory
run the following command to download the testing data: initially, you need to run the following command to download the testing data:
``` ```
$ ./download_data.sh $ ./download_data.sh
``` ```
After doing this the big data file will automaticly generated in the benchmark directory. After doing this the big data file will automaticly generated in the
benchmark directory.
## Run instructions ## Run instructions
@ -65,8 +66,8 @@ $ make cpp
### Python: ### Python:
We have three versions of python protobuf implementation: pure python, cpp reflection and We have three versions of python protobuf implementation: pure python, cpp
cpp generated code. To run these version benchmark, you need to: reflection and cpp generated code. To run these version benchmark, you need to:
#### Pure Python: #### Pure Python:

@ -121,9 +121,10 @@ class ParseNewArenaFixture : public Fixture {
virtual void BenchmarkCase(benchmark::State& state) { virtual void BenchmarkCase(benchmark::State& state) {
WrappingCounter i(payloads_.size()); WrappingCounter i(payloads_.size());
size_t total = 0; size_t total = 0;
Arena arena;
while (state.KeepRunning()) { while (state.KeepRunning()) {
Arena arena; arena.Reset();
Message* m = Arena::CreateMessage<T>(&arena); Message* m = Arena::CreateMessage<T>(&arena);
const std::string& payload = payloads_[i.Next()]; const std::string& payload = payloads_[i.Next()];
total += payload.size(); total += payload.size();

@ -153,10 +153,8 @@ public class ProtoBenchCaliper {
@Benchmark @Benchmark
void deserializeFromByteString(int reps) throws IOException { void deserializeFromByteString(int reps) throws IOException {
for (int i = 0; i < reps; i++) { for (int i = 0; i < reps; i++) {
defaultMessage benchmarkMessageType.getDefaultInstance().getParserForType().parseFrom(
.newBuilderForType() inputStringList.get((int) (counter % inputStringList.size())), extensions);
.mergeFrom(inputStringList.get((int) (counter % inputStringList.size())), extensions)
.build();
counter++; counter++;
} }
} }
@ -164,10 +162,8 @@ public class ProtoBenchCaliper {
@Benchmark @Benchmark
void deserializeFromByteArray(int reps) throws IOException { void deserializeFromByteArray(int reps) throws IOException {
for (int i = 0; i < reps; i++) { for (int i = 0; i < reps; i++) {
defaultMessage benchmarkMessageType.getDefaultInstance().getParserForType().parseFrom(
.newBuilderForType() inputDataList.get((int) (counter % inputDataList.size())), extensions);
.mergeFrom(inputDataList.get((int) (counter % inputDataList.size())), extensions)
.build();
counter++; counter++;
} }
} }
@ -175,10 +171,8 @@ public class ProtoBenchCaliper {
@Benchmark @Benchmark
void deserializeFromMemoryStream(int reps) throws IOException { void deserializeFromMemoryStream(int reps) throws IOException {
for (int i = 0; i < reps; i++) { for (int i = 0; i < reps; i++) {
defaultMessage benchmarkMessageType.getDefaultInstance().getParserForType().parseFrom(
.newBuilderForType() inputStreamList.get((int) (counter % inputStreamList.size())), extensions);
.mergeFrom(inputStreamList.get((int) (counter % inputStreamList.size())), extensions)
.build();
inputStreamList.get((int) (counter % inputStreamList.size())).reset(); inputStreamList.get((int) (counter % inputStreamList.size())).reset();
counter++; counter++;
} }

@ -4,6 +4,7 @@ import timeit
import math import math
import fnmatch import fnmatch
# BEGIN CPP GENERATED MESSAGE
# CPP generated code must be linked before importing the generated Python code # CPP generated code must be linked before importing the generated Python code
# for the descriptor can be found in the pool # for the descriptor can be found in the pool
if len(sys.argv) < 2: if len(sys.argv) < 2:
@ -14,13 +15,14 @@ if sys.argv[1] == "true":
sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) + "/tmp" ) sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) + "/tmp" )
elif sys.argv[1] != "false": elif sys.argv[1] != "false":
raise IOError("Need string argument \"true\" or \"false\" for whether to use cpp generated code") raise IOError("Need string argument \"true\" or \"false\" for whether to use cpp generated code")
# END CPP GENERATED MESSAGE
import datasets.google_message1.benchmark_message1_proto2_pb2 as benchmark_message1_proto2_pb2 import datasets.google_message1.benchmark_message1_proto2_pb2 as benchmark_message1_proto2_pb2
import datasets.google_message1.benchmark_message1_proto3_pb2 as benchmark_message1_proto3_pb2 import datasets.google_message1.benchmark_message1_proto3_pb2 as benchmark_message1_proto3_pb2
import datasets.google_message2.benchmark_message2_pb2 as benchmark_message2_pb2 import datasets.google_message2.benchmark_message2_pb2 as benchmark_message2_pb2
import datasets.google_message3.benchmark_message3_pb2 as benchmark_message3_pb2 import datasets.google_message3.benchmark_message3_pb2 as benchmark_message3_pb2
import datasets.google_message4.benchmark_message4_pb2 as benchmark_message4_pb2 import datasets.google_message4.benchmark_message4_pb2 as benchmark_message4_pb2
import benchmarks_pb2 import benchmarks_pb2 as benchmarks_pb2
def run_one_test(filename): def run_one_test(filename):

Loading…
Cancel
Save