mirror of https://github.com/grpc/grpc.git
[benchmarks] Remove stats integration (#30900)
* remove old stats cruft * remove * remove * fix * fixpull/30909/head
parent
b9dfcc092e
commit
1f1f923a72
16 changed files with 14 additions and 1203 deletions
@ -1,80 +0,0 @@ |
||||
#!/bin/bash |
||||
# Copyright 2016 gRPC authors. |
||||
# |
||||
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
# you may not use this file except in compliance with the License. |
||||
# You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
|
||||
# format argument via |
||||
# $ echo '{...}' | python -mjson.tool |
||||
read -r -d '' SCENARIOS_JSON_ARG <<'EOF' |
||||
{ |
||||
"scenarios": [ |
||||
{ |
||||
"benchmark_seconds": 60, |
||||
"warmup_seconds": 5, |
||||
"client_config": { |
||||
"client_channels": 100, |
||||
"client_type": "ASYNC_CLIENT", |
||||
"histogram_params": { |
||||
"max_possible": 60000000000.0, |
||||
"resolution": 0.01 |
||||
}, |
||||
"load_params": { |
||||
"closed_loop": {} |
||||
}, |
||||
"outstanding_rpcs_per_channel": 100, |
||||
"payload_config": { |
||||
"simple_params": { |
||||
"req_size": 0, |
||||
"resp_size": 0 |
||||
} |
||||
}, |
||||
"rpc_type": "UNARY", |
||||
"security_params": null |
||||
}, |
||||
"name": "name_goes_here", |
||||
"num_clients": 1, |
||||
"num_servers": 1, |
||||
"server_config": { |
||||
"security_params": null, |
||||
"server_type": "ASYNC_SERVER" |
||||
}, |
||||
"spawn_local_worker_count": -2 |
||||
} |
||||
] |
||||
} |
||||
|
||||
EOF |
||||
|
||||
set -ex |
||||
|
||||
cd $(dirname $0)/../../.. |
||||
|
||||
CPUS=`python -c 'import multiprocessing; print multiprocessing.cpu_count()'` |
||||
|
||||
# try to use pypy for generating reports |
||||
# each trace dumps 7-8gig of text to disk, and processing this into a report is |
||||
# heavyweight - so any speed boost is worthwhile |
||||
# TODO(ctiller): consider rewriting report generation in C++ for performance |
||||
if which pypy >/dev/null; then |
||||
PYTHON=pypy |
||||
else |
||||
PYTHON=python2.7 |
||||
fi |
||||
|
||||
export config=mutrace |
||||
|
||||
make CONFIG=$config -j$CPUS qps_json_driver |
||||
|
||||
sudo perf record -F 997 -g bins/$config/qps_json_driver --scenarios_json="$SCENARIOS_JSON_ARG" |
||||
sudo perf report |
||||
|
@ -1,172 +0,0 @@ |
||||
# Copyright 2017 gRPC authors. |
||||
# |
||||
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
# you may not use this file except in compliance with the License. |
||||
# You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
|
||||
# Autogenerated by tools/codegen/core/gen_stats_data.py |
||||
|
||||
import massage_qps_stats_helpers |
||||
|
||||
|
||||
def massage_qps_stats(scenario_result): |
||||
for stats in scenario_result["serverStats"] + scenario_result["clientStats"]: |
||||
if "coreStats" in stats: |
||||
# Get rid of the "coreStats" element and replace it by statistics |
||||
# that correspond to columns in the bigquery schema. |
||||
core_stats = stats["coreStats"] |
||||
del stats["coreStats"] |
||||
stats[ |
||||
"core_client_calls_created"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "client_calls_created") |
||||
stats[ |
||||
"core_server_calls_created"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "server_calls_created") |
||||
stats[ |
||||
"core_client_channels_created"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "client_channels_created") |
||||
stats[ |
||||
"core_client_subchannels_created"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "client_subchannels_created") |
||||
stats[ |
||||
"core_server_channels_created"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "server_channels_created") |
||||
stats["core_syscall_write"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "syscall_write") |
||||
stats["core_syscall_read"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "syscall_read") |
||||
stats["core_tcp_read_alloc_8k"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "tcp_read_alloc_8k") |
||||
stats[ |
||||
"core_tcp_read_alloc_64k"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "tcp_read_alloc_64k") |
||||
stats[ |
||||
"core_http2_settings_writes"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "http2_settings_writes") |
||||
stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "http2_pings_sent") |
||||
stats[ |
||||
"core_http2_writes_begun"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "http2_writes_begun") |
||||
stats[ |
||||
"core_http2_transport_stalls"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "http2_transport_stalls") |
||||
stats[ |
||||
"core_http2_stream_stalls"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "http2_stream_stalls") |
||||
stats["core_cq_pluck_creates"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "cq_pluck_creates") |
||||
stats["core_cq_next_creates"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "cq_next_creates") |
||||
stats[ |
||||
"core_cq_callback_creates"] = massage_qps_stats_helpers.counter( |
||||
core_stats, "cq_callback_creates") |
||||
h = massage_qps_stats_helpers.histogram(core_stats, |
||||
"call_initial_size") |
||||
stats["core_call_initial_size"] = ",".join( |
||||
"%f" % x for x in h.buckets) |
||||
stats["core_call_initial_size_bkts"] = ",".join( |
||||
"%f" % x for x in h.boundaries) |
||||
stats[ |
||||
"core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 50, h.boundaries) |
||||
stats[ |
||||
"core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 95, h.boundaries) |
||||
stats[ |
||||
"core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 99, h.boundaries) |
||||
h = massage_qps_stats_helpers.histogram(core_stats, |
||||
"tcp_write_size") |
||||
stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets) |
||||
stats["core_tcp_write_size_bkts"] = ",".join( |
||||
"%f" % x for x in h.boundaries) |
||||
stats[ |
||||
"core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 50, h.boundaries) |
||||
stats[ |
||||
"core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 95, h.boundaries) |
||||
stats[ |
||||
"core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 99, h.boundaries) |
||||
h = massage_qps_stats_helpers.histogram(core_stats, |
||||
"tcp_write_iov_size") |
||||
stats["core_tcp_write_iov_size"] = ",".join( |
||||
"%f" % x for x in h.buckets) |
||||
stats["core_tcp_write_iov_size_bkts"] = ",".join( |
||||
"%f" % x for x in h.boundaries) |
||||
stats[ |
||||
"core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 50, h.boundaries) |
||||
stats[ |
||||
"core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 95, h.boundaries) |
||||
stats[ |
||||
"core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 99, h.boundaries) |
||||
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size") |
||||
stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets) |
||||
stats["core_tcp_read_size_bkts"] = ",".join( |
||||
"%f" % x for x in h.boundaries) |
||||
stats[ |
||||
"core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 50, h.boundaries) |
||||
stats[ |
||||
"core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 95, h.boundaries) |
||||
stats[ |
||||
"core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 99, h.boundaries) |
||||
h = massage_qps_stats_helpers.histogram(core_stats, |
||||
"tcp_read_offer") |
||||
stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets) |
||||
stats["core_tcp_read_offer_bkts"] = ",".join( |
||||
"%f" % x for x in h.boundaries) |
||||
stats[ |
||||
"core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 50, h.boundaries) |
||||
stats[ |
||||
"core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 95, h.boundaries) |
||||
stats[ |
||||
"core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 99, h.boundaries) |
||||
h = massage_qps_stats_helpers.histogram(core_stats, |
||||
"tcp_read_offer_iov_size") |
||||
stats["core_tcp_read_offer_iov_size"] = ",".join( |
||||
"%f" % x for x in h.buckets) |
||||
stats["core_tcp_read_offer_iov_size_bkts"] = ",".join( |
||||
"%f" % x for x in h.boundaries) |
||||
stats[ |
||||
"core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 50, h.boundaries) |
||||
stats[ |
||||
"core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 95, h.boundaries) |
||||
stats[ |
||||
"core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 99, h.boundaries) |
||||
h = massage_qps_stats_helpers.histogram(core_stats, |
||||
"http2_send_message_size") |
||||
stats["core_http2_send_message_size"] = ",".join( |
||||
"%f" % x for x in h.buckets) |
||||
stats["core_http2_send_message_size_bkts"] = ",".join( |
||||
"%f" % x for x in h.boundaries) |
||||
stats[ |
||||
"core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 50, h.boundaries) |
||||
stats[ |
||||
"core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 95, h.boundaries) |
||||
stats[ |
||||
"core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile( |
||||
h.buckets, 99, h.boundaries) |
@ -1,62 +0,0 @@ |
||||
# Copyright 2017 gRPC authors. |
||||
# |
||||
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
# you may not use this file except in compliance with the License. |
||||
# You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
|
||||
import collections |
||||
|
||||
|
||||
def _threshold_for_count_below(buckets, boundaries, count_below): |
||||
count_so_far = 0 |
||||
for lower_idx in range(0, len(buckets)): |
||||
count_so_far += buckets[lower_idx] |
||||
if count_so_far >= count_below: |
||||
break |
||||
if count_so_far == count_below: |
||||
# this bucket hits the threshold exactly... we should be midway through |
||||
# any run of zero values following the bucket |
||||
for upper_idx in range(lower_idx + 1, len(buckets)): |
||||
if buckets[upper_idx] != 0: |
||||
break |
||||
return (boundaries[lower_idx] + boundaries[upper_idx]) / 2.0 |
||||
else: |
||||
# treat values as uniform throughout the bucket, and find where this value |
||||
# should lie |
||||
lower_bound = boundaries[lower_idx] |
||||
upper_bound = boundaries[lower_idx + 1] |
||||
return (upper_bound - (upper_bound - lower_bound) * |
||||
(count_so_far - count_below) / float(buckets[lower_idx])) |
||||
|
||||
|
||||
def percentile(buckets, pctl, boundaries): |
||||
return _threshold_for_count_below(buckets, boundaries, |
||||
sum(buckets) * pctl / 100.0) |
||||
|
||||
|
||||
def counter(core_stats, name): |
||||
for stat in core_stats['metrics']: |
||||
if stat['name'] == name: |
||||
return int(stat.get('count', 0)) |
||||
|
||||
|
||||
Histogram = collections.namedtuple('Histogram', 'buckets boundaries') |
||||
|
||||
|
||||
def histogram(core_stats, name): |
||||
for stat in core_stats['metrics']: |
||||
if stat['name'] == name: |
||||
buckets = [] |
||||
boundaries = [] |
||||
for b in stat['histogram']['buckets']: |
||||
buckets.append(int(b.get('count', 0))) |
||||
boundaries.append(int(b.get('start', 0))) |
||||
return Histogram(buckets=buckets, boundaries=boundaries) |
Loading…
Reference in new issue