From f7af2a9a0517ada19e0ce1a092ca064985f604e5 Mon Sep 17 00:00:00 2001
From: Craig Tiller
Date: Tue, 31 Jan 2017 15:08:31 -0800
Subject: [PATCH] Update latency profiler to use (more appropriate)
microbenchmarks
---
src/core/lib/slice/slice_intern.c | 6 +-
test/cpp/microbenchmarks/bm_fullstack.cc | 4 +
.../latency_profile/run_latency_profile.sh | 61 +---------
tools/profiling/microbenchmark/bm.py | 109 ++++++++++++++++++
4 files changed, 119 insertions(+), 61 deletions(-)
create mode 100755 tools/profiling/microbenchmark/bm.py
diff --git a/src/core/lib/slice/slice_intern.c b/src/core/lib/slice/slice_intern.c
index 7cbd17bffd8..32adc4df97d 100644
--- a/src/core/lib/slice/slice_intern.c
+++ b/src/core/lib/slice/slice_intern.c
@@ -215,7 +215,9 @@ bool grpc_slice_is_interned(grpc_slice slice) {
}
grpc_slice grpc_slice_intern(grpc_slice slice) {
+ GPR_TIMER_BEGIN("grpc_slice_intern", 0);
if (GRPC_IS_STATIC_METADATA_STRING(slice)) {
+ GPR_TIMER_END("grpc_slice_intern", 0);
return slice;
}
@@ -225,6 +227,7 @@ grpc_slice grpc_slice_intern(grpc_slice slice) {
static_metadata_hash[(hash + i) % GPR_ARRAY_SIZE(static_metadata_hash)];
if (ent.hash == hash && ent.idx < GRPC_STATIC_MDSTR_COUNT &&
grpc_slice_eq(grpc_static_slice_table[ent.idx], slice)) {
+ GPR_TIMER_END("grpc_slice_intern", 0);
return grpc_static_slice_table[ent.idx];
}
}
@@ -247,7 +250,7 @@ grpc_slice grpc_slice_intern(grpc_slice slice) {
/* and treat this as if we were never here... sshhh */
} else {
gpr_mu_unlock(&shard->mu);
- GPR_TIMER_END("grpc_mdstr_from_buffer", 0);
+ GPR_TIMER_END("grpc_slice_intern", 0);
return materialize(s);
}
}
@@ -275,6 +278,7 @@ grpc_slice grpc_slice_intern(grpc_slice slice) {
gpr_mu_unlock(&shard->mu);
+ GPR_TIMER_END("grpc_slice_intern", 0);
return materialize(s);
}
diff --git a/test/cpp/microbenchmarks/bm_fullstack.cc b/test/cpp/microbenchmarks/bm_fullstack.cc
index c3e96c572c8..589b319eb70 100644
--- a/test/cpp/microbenchmarks/bm_fullstack.cc
+++ b/test/cpp/microbenchmarks/bm_fullstack.cc
@@ -57,6 +57,7 @@ extern "C" {
#include "test/core/util/passthru_endpoint.h"
#include "test/core/util/port.h"
}
+#include "src/core/lib/profiling/timers.h"
#include "src/cpp/client/create_channel_internal.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "third_party/benchmark/include/benchmark/benchmark.h"
@@ -402,6 +403,7 @@ static void BM_UnaryPingPong(benchmark::State& state) {
std::unique_ptr stub(
EchoTestService::NewStub(fixture->channel()));
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
recv_response.Clear();
ClientContext cli_ctx;
ClientContextMutator cli_ctx_mut(&cli_ctx);
@@ -470,6 +472,7 @@ static void BM_PumpStreamClientToServer(benchmark::State& state) {
}
response_rw.Read(&recv_request, tag(0));
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
request_rw->Write(send_request, tag(1));
while (true) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
@@ -527,6 +530,7 @@ static void BM_PumpStreamServerToClient(benchmark::State& state) {
}
request_rw->Read(&recv_response, tag(0));
while (state.KeepRunning()) {
+ GPR_TIMER_SCOPE("BenchmarkCycle", 0);
response_rw.Write(send_response, tag(1));
while (true) {
GPR_ASSERT(fixture->cq()->Next(&t, &ok));
diff --git a/tools/profiling/latency_profile/run_latency_profile.sh b/tools/profiling/latency_profile/run_latency_profile.sh
index 618db202dc4..7ebe308e0a0 100755
--- a/tools/profiling/latency_profile/run_latency_profile.sh
+++ b/tools/profiling/latency_profile/run_latency_profile.sh
@@ -28,55 +28,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# format argument via
-# $ echo '{...}' | python -mjson.tool
-read -r -d '' SCENARIOS_JSON_ARG <<'EOF'
-{
- "scenarios": [
- {
- "benchmark_seconds": 5,
- "client_config": {
- "client_channels": 1,
- "client_type": "SYNC_CLIENT",
- "histogram_params": {
- "max_possible": 60000000000.0,
- "resolution": 0.01
- },
- "load_params": {
- "closed_loop": {}
- },
- "outstanding_rpcs_per_channel": 1,
- "payload_config": {
- "simple_params": {
- "req_size": 0,
- "resp_size": 0
- }
- },
- "rpc_type": "UNARY",
- "security_params": {
- "server_host_override": "foo.test.google.fr",
- "use_test_ca": true
- }
- },
- "name": "cpp_protobuf_sync_unary_ping_pong_secure",
- "num_clients": 1,
- "num_servers": 1,
- "server_config": {
- "core_limit": 1,
- "security_params": {
- "server_host_override": "foo.test.google.fr",
- "use_test_ca": true
- },
- "server_type": "SYNC_SERVER"
- },
- "spawn_local_worker_count": 2,
- "warmup_seconds": 5
- }
- ]
-}
-
-EOF
-
set -ex
cd $(dirname $0)/../../..
@@ -93,14 +44,4 @@ else
PYTHON=python2.7
fi
-make CONFIG=basicprof -j$CPUS qps_json_driver
-
-mkdir -p reports
-bins/basicprof/qps_json_driver --scenarios_json="$SCENARIOS_JSON_ARG"
-
-echo 'Latency profile for:
' > reports/index.html
-echo "${SCENARIOS_JSON_ARG}
" >> reports/index.html
-echo '' >> reports/index.html
-$PYTHON tools/profiling/latency_profile/profile_analyzer.py \
- --source=latency_trace.txt --fmt=simple >> reports/index.html
-echo '
' >> reports/index.html
+$PYTHON tools/profiling/microbenchmark/bm.py bm_fullstack
diff --git a/tools/profiling/microbenchmark/bm.py b/tools/profiling/microbenchmark/bm.py
new file mode 100755
index 00000000000..064bb7af1b4
--- /dev/null
+++ b/tools/profiling/microbenchmark/bm.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python2.7
+# Copyright 2017, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import multiprocessing
+import os
+import subprocess
+import sys
+
+flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
+
+def fnize(s):
+ out = ''
+ for c in s:
+ if c in '<>, /':
+ if len(out) and out[-1] == '_': continue
+ out += '_'
+ else:
+ out += c
+ return out
+
+os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
+if not os.path.exists('reports'):
+ os.makedirs('reports')
+
+# index html
+index_html = """
+
+
+Microbenchmark Results
+
+
+"""
+
+def heading(name):
+ global index_html
+ index_html += "%s
\n" % name
+
+def link(txt, tgt):
+ global index_html
+ index_html += "%s
\n" % (tgt, txt)
+
+for bm_name in sys.argv[1:]:
+ # generate latency profiles
+ heading('Latency Profiles: %s' % bm_name)
+ subprocess.check_call(
+ ['make', bm_name,
+ 'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()])
+ for line in subprocess.check_output(['bins/basicprof/%s' % bm_name,
+ '--benchmark_list_tests']).splitlines():
+ link(line, 'reports/%s.txt' % fnize(line))
+ with open('reports/%s.txt' % fnize(line), 'w') as f:
+ f.write(subprocess.check_output(['bins/basicprof/%s' % bm_name,
+ '--benchmark_filter=^%s$' % line]))
+ f.write('\n***********************************************************\n')
+ f.write(subprocess.check_output([
+ sys.executable, 'tools/profiling/latency_profile/profile_analyzer.py',
+ '--source', 'latency_trace.txt', '--fmt', 'simple']))
+
+ # generate flamegraphs
+ heading('Flamegraphs: %s' % bm_name)
+ subprocess.check_call(
+ ['make', bm_name,
+ 'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
+ for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
+ '--benchmark_list_tests']).splitlines():
+ subprocess.check_call(['sudo', 'perf', 'record', '-g', '-F', '99',
+ 'bins/mutrace/%s' % bm_name,
+ '--benchmark_filter=^%s$' % line,
+ '--benchmark_min_time=20'])
+ with open('/tmp/bm.perf', 'w') as f:
+ f.write(subprocess.check_output(['sudo', 'perf', 'script']))
+ with open('/tmp/bm.folded', 'w') as f:
+ f.write(subprocess.check_output([
+ '%s/stackcollapse-perf.pl' % flamegraph_dir, '/tmp/bm.perf']))
+ link(line, 'reports/%s.svg' % fnize(line))
+ with open('reports/%s.svg' % fnize(line), 'w') as f:
+ f.write(subprocess.check_output([
+ '%s/flamegraph.pl' % flamegraph_dir, '/tmp/bm.folded']))
+
+index_html += "\n\n"
+with open('reports/index.html', 'w') as f:
+ w.write(index_html)