Merge branch 'bazel_gflags' into build_files

pull/9773/head
David Garcia Quintas 8 years ago
commit bfd2c647b4
  1. 18
      WORKSPACE
  2. 2
      third_party/gflags
  3. 6
      tools/run_tests/run_microbenchmark.py
  4. 2
      tools/run_tests/sanity/check_submodules.sh

@ -33,26 +33,36 @@ bind(
actual = "@submodule_gtest//:gtest",
)
bind(
name = "gflags",
actual = "@com_github_gflags_gflags//:gflags",
)
new_local_repository(
name = "submodule_boringssl",
path = "third_party/boringssl-with-bazel",
build_file = "third_party/boringssl-with-bazel/BUILD",
path = "third_party/boringssl-with-bazel",
)
new_local_repository(
name = "submodule_zlib",
path = "third_party/zlib",
build_file = "third_party/zlib.BUILD",
path = "third_party/zlib",
)
new_local_repository(
name = "submodule_protobuf",
path = "third_party/protobuf",
build_file = "third_party/protobuf/BUILD",
path = "third_party/protobuf",
)
new_local_repository(
name = "submodule_gtest",
path = "third_party/googletest",
build_file = "third_party/gtest.BUILD",
path = "third_party/googletest",
)
local_repository(
name = "com_github_gflags_gflags",
path = "third_party/gflags",
)

@ -1 +1 @@
Subproject commit f8a0efe03aa69b3336d8e228b37d4ccb17324b88
Subproject commit 30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e

@ -93,7 +93,9 @@ def collect_latency(bm_name, args):
'--benchmark_list_tests']).splitlines():
link(line, '%s.txt' % fnize(line))
benchmarks.append(
jobset.JobSpec(['bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' % line],
jobset.JobSpec(['bins/basicprof/%s' % bm_name,
'--benchmark_filter=^%s$' % line,
'--benchmark_min_time=0.05'],
environ={'LATENCY_TRACE': '%s.trace' % fnize(line)}))
profile_analysis.append(
jobset.JobSpec([sys.executable,
@ -105,7 +107,7 @@ def collect_latency(bm_name, args):
# consume upwards of five gigabytes of ram in some cases, and so analysing
# hundreds of them at once is impractical -- but we want at least some
# concurrency or the work takes too long
if len(benchmarks) >= min(4, multiprocessing.cpu_count()):
if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2),

@ -44,7 +44,7 @@ cat << EOF | awk '{ print $1 }' | sort > $want_submodules
44c25c892a6229b20db7cd9dc05584ea865896de third_party/benchmark (v0.1.0-343-g44c25c8)
78684e5b222645828ca302e56b40b9daff2b2d27 third_party/boringssl (78684e5)
886e7d75368e3f4fab3f4d0d3584e4abfc557755 third_party/boringssl-with-bazel (version_for_cocoapods_7.0-857-g886e7d7)
f8a0efe03aa69b3336d8e228b37d4ccb17324b88 third_party/gflags (v2.2.0)
30dbc81fb5ffdc98ea9b14b1918bfe4e8779b26e third_party/gflags (v2.2.0)
c99458533a9b4c743ed51537e25989ea55944908 third_party/googletest (release-1.7.0)
a428e42072765993ff674fda72863c9f1aa2d268 third_party/protobuf (v3.1.0-alpha-1)
bcad91771b7f0bff28a1cac1981d7ef2b9bcef3c third_party/thrift (bcad917)

Loading…
Cancel
Save