[stats] Move core stats to C++ (#30936)

* begin c++

* Automated change: Fix sanity tests

* progress

* progress

* missing-files

* Automated change: Fix sanity tests

* moved-from-stats

* remove old benchmark cruft, get tests compiling

* iwyu

* Automated change: Fix sanity tests

* fix

* fix

* fixes

* fixes

* add needed constructor

* Automated change: Fix sanity tests

* iwyu

* fix

* fix?

* fix

* fix

* Remove ResetDefaultEventEngine

Now that it is a weak_ptr, there's no need to explicitly reset it. When
the tracked shared_ptr is deleted, the weak_ptr will fail to lock, and a
new default EventEngine will be created.

* forget existing engine with FactoryReset

* add visibility

* fix

Co-authored-by: ctiller <ctiller@users.noreply.github.com>
Co-authored-by: AJ Heller <hork@google.com>
pull/31301/head
Craig Tiller 2 years ago committed by GitHub
parent 90beb3f4c4
commit 20d1efc38a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 77
      BUILD
  2. 15
      CMakeLists.txt
  3. 2
      Makefile
  4. 10
      build_autogenerated.yaml
  5. 1
      config.m4
  6. 1
      config.w32
  7. 2
      gRPC-C++.podspec
  8. 3
      gRPC-Core.podspec
  9. 2
      grpc.gemspec
  10. 2
      grpc.gyp
  11. 2
      package.xml
  12. 3
      src/core/ext/filters/client_channel/subchannel.cc
  13. 3
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  14. 11
      src/core/ext/transport/chttp2/transport/writing.cc
  15. 69
      src/core/lib/debug/histogram_view.cc
  16. 39
      src/core/lib/debug/histogram_view.h
  17. 141
      src/core/lib/debug/stats.cc
  18. 64
      src/core/lib/debug/stats.h
  19. 297
      src/core/lib/debug/stats_data.cc
  20. 385
      src/core/lib/debug/stats_data.h
  21. 17
      src/core/lib/debug/stats_data_bq_schema.sql
  22. 1
      src/core/lib/iomgr/call_combiner.cc
  23. 1
      src/core/lib/iomgr/ev_epoll1_linux.cc
  24. 1
      src/core/lib/iomgr/ev_poll_posix.cc
  25. 1
      src/core/lib/iomgr/iocp_windows.cc
  26. 25
      src/core/lib/iomgr/tcp_posix.cc
  27. 9
      src/core/lib/surface/call.cc
  28. 5
      src/core/lib/surface/channel.cc
  29. 7
      src/core/lib/surface/completion_queue.cc
  30. 97
      src/cpp/util/core_stats.cc
  31. 32
      src/cpp/util/core_stats.h
  32. 38
      src/proto/grpc/core/BUILD
  33. 9
      src/proto/grpc/testing/BUILD
  34. 8
      src/proto/grpc/testing/stats.proto
  35. 1
      src/python/grpcio/grpc_core_dependencies.py
  36. 1
      src/python/grpcio_tests/tests/qps/BUILD.bazel
  37. 1
      src/python/grpcio_tests/tests_aio/benchmark/BUILD.bazel
  38. 135
      test/core/debug/stats_test.cc
  39. 25
      test/core/end2end/tests/simple_request.cc
  40. 2
      test/cpp/microbenchmarks/bm_alarm.cc
  41. 19
      test/cpp/microbenchmarks/bm_call_create.cc
  42. 31
      test/cpp/microbenchmarks/bm_chttp2_hpack.cc
  43. 6
      test/cpp/microbenchmarks/bm_chttp2_transport.cc
  44. 57
      test/cpp/microbenchmarks/bm_closure.cc
  45. 18
      test/cpp/microbenchmarks/bm_cq.cc
  46. 5
      test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
  47. 12
      test/cpp/microbenchmarks/bm_pollset.cc
  48. 1
      test/cpp/microbenchmarks/callback_streaming_ping_pong.h
  49. 1
      test/cpp/microbenchmarks/callback_unary_ping_pong.h
  50. 26
      test/cpp/microbenchmarks/fullstack_fixtures.h
  51. 3
      test/cpp/microbenchmarks/fullstack_streaming_ping_pong.h
  52. 2
      test/cpp/microbenchmarks/fullstack_streaming_pump.h
  53. 1
      test/cpp/microbenchmarks/fullstack_unary_ping_pong.h
  54. 37
      test/cpp/microbenchmarks/helpers.cc
  55. 14
      test/cpp/microbenchmarks/helpers.h
  56. 1
      test/cpp/naming/cancel_ares_query_test.cc
  57. 1
      test/cpp/qps/BUILD
  58. 7
      test/cpp/qps/client.h
  59. 31
      test/cpp/qps/report.cc
  60. 3
      test/cpp/qps/report.h
  61. 5
      test/cpp/qps/server.h
  62. 273
      tools/codegen/core/gen_stats_data.py
  63. 2
      tools/doxygen/Doxyfile.c++.internal
  64. 2
      tools/doxygen/Doxyfile.core.internal

77
BUILD

@ -3140,6 +3140,56 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "histogram_view",
srcs = [
"src/core/lib/debug/histogram_view.cc",
],
hdrs = [
"src/core/lib/debug/histogram_view.h",
],
deps = ["gpr"],
)
grpc_cc_library(
name = "stats_data",
srcs = [
"src/core/lib/debug/stats_data.cc",
],
hdrs = [
"src/core/lib/debug/stats_data.h",
],
external_deps = ["absl/strings"],
deps = [
"gpr_platform",
"histogram_view",
"per_cpu",
],
)
grpc_cc_library(
name = "stats",
srcs = [
"src/core/lib/debug/stats.cc",
],
hdrs = [
"src/core/lib/debug/stats.h",
],
external_deps = [
"absl/strings",
"absl/types:span",
],
visibility = [
"@grpc:alt_grpc_base_legacy",
],
deps = [
"gpr",
"histogram_view",
"no_destruct",
"stats_data",
],
)
grpc_cc_library(
name = "per_cpu",
hdrs = [
@ -3186,8 +3236,6 @@ grpc_cc_library(
"src/core/lib/compression/compression.cc",
"src/core/lib/compression/compression_internal.cc",
"src/core/lib/compression/message_compress.cc",
"src/core/lib/debug/stats.cc",
"src/core/lib/debug/stats_data.cc",
"src/core/lib/event_engine/channel_args_endpoint_config.cc",
"src/core/lib/iomgr/buffer_list.cc",
"src/core/lib/iomgr/call_combiner.cc",
@ -3296,8 +3344,6 @@ grpc_cc_library(
"src/core/lib/compression/compression_internal.h",
"src/core/lib/resource_quota/api.h",
"src/core/lib/compression/message_compress.h",
"src/core/lib/debug/stats.h",
"src/core/lib/debug/stats_data.h",
"src/core/lib/event_engine/channel_args_endpoint_config.h",
"src/core/lib/iomgr/block_annotate.h",
"src/core/lib/iomgr/buffer_list.h",
@ -3465,6 +3511,8 @@ grpc_cc_library(
"slice_buffer",
"slice_refcount",
"sockaddr_utils",
"stats",
"stats_data",
"status_helper",
"strerror",
"thread_quota",
@ -4004,6 +4052,8 @@ grpc_cc_library(
"slice_buffer",
"slice_refcount",
"sockaddr_utils",
"stats",
"stats_data",
"status_helper",
"subchannel_interface",
"time",
@ -7081,6 +7131,8 @@ grpc_cc_library(
"slice",
"slice_buffer",
"slice_refcount",
"stats",
"stats_data",
"status_helper",
"time",
"transport_fwd",
@ -7660,23 +7712,6 @@ grpc_cc_library(
],
)
grpc_cc_library(
name = "grpc++_core_stats",
srcs = [
"src/cpp/util/core_stats.cc",
],
hdrs = [
"src/cpp/util/core_stats.h",
],
language = "c++",
deps = [
"gpr",
"gpr_atm",
"grpc_base",
"//src/proto/grpc/core:stats_proto",
],
)
grpc_cc_library(
name = "grpc_opencensus_plugin",
srcs = [

15
CMakeLists.txt generated

@ -571,9 +571,6 @@ add_custom_target(tools
protobuf_generate_grpc_cpp_with_import_path_correction(
src/proto/grpc/channelz/channelz.proto src/proto/grpc/channelz/channelz.proto
)
protobuf_generate_grpc_cpp_with_import_path_correction(
src/proto/grpc/core/stats.proto src/proto/grpc/core/stats.proto
)
protobuf_generate_grpc_cpp_with_import_path_correction(
src/proto/grpc/health/v1/health.proto src/proto/grpc/health/v1/health.proto
)
@ -2112,6 +2109,7 @@ add_library(grpc
src/core/lib/compression/message_compress.cc
src/core/lib/config/core_configuration.cc
src/core/lib/debug/event_log.cc
src/core/lib/debug/histogram_view.cc
src/core/lib/debug/stats.cc
src/core/lib/debug/stats_data.cc
src/core/lib/debug/trace.cc
@ -2720,6 +2718,7 @@ add_library(grpc_unsecure
src/core/lib/compression/message_compress.cc
src/core/lib/config/core_configuration.cc
src/core/lib/debug/event_log.cc
src/core/lib/debug/histogram_view.cc
src/core/lib/debug/stats.cc
src/core/lib/debug/stats_data.cc
src/core/lib/debug/trace.cc
@ -14864,10 +14863,6 @@ endif()
if(gRPC_BUILD_TESTS)
add_executable(qps_json_driver
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.pb.h
@ -14896,7 +14891,6 @@ add_executable(qps_json_driver
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.grpc.pb.h
src/cpp/util/core_stats.cc
test/cpp/qps/benchmark_config.cc
test/cpp/qps/client_async.cc
test/cpp/qps/client_callback.cc
@ -14946,10 +14940,6 @@ endif()
if(gRPC_BUILD_TESTS)
add_executable(qps_worker
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/core/stats.grpc.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/benchmark_service.pb.h
@ -14974,7 +14964,6 @@ add_executable(qps_worker
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.grpc.pb.cc
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.pb.h
${_gRPC_PROTO_GENS_DIR}/src/proto/grpc/testing/worker_service.grpc.pb.h
src/cpp/util/core_stats.cc
test/cpp/qps/client_async.cc
test/cpp/qps/client_callback.cc
test/cpp/qps/client_sync.cc

2
Makefile generated

@ -1384,6 +1384,7 @@ LIBGRPC_SRC = \
src/core/lib/compression/message_compress.cc \
src/core/lib/config/core_configuration.cc \
src/core/lib/debug/event_log.cc \
src/core/lib/debug/histogram_view.cc \
src/core/lib/debug/stats.cc \
src/core/lib/debug/stats_data.cc \
src/core/lib/debug/trace.cc \
@ -1855,6 +1856,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/compression/message_compress.cc \
src/core/lib/config/core_configuration.cc \
src/core/lib/debug/event_log.cc \
src/core/lib/debug/histogram_view.cc \
src/core/lib/debug/stats.cc \
src/core/lib/debug/stats_data.cc \
src/core/lib/debug/trace.cc \

@ -727,6 +727,7 @@ libs:
- src/core/lib/compression/message_compress.h
- src/core/lib/config/core_configuration.h
- src/core/lib/debug/event_log.h
- src/core/lib/debug/histogram_view.h
- src/core/lib/debug/stats.h
- src/core/lib/debug/stats_data.h
- src/core/lib/debug/trace.h
@ -1441,6 +1442,7 @@ libs:
- src/core/lib/compression/message_compress.cc
- src/core/lib/config/core_configuration.cc
- src/core/lib/debug/event_log.cc
- src/core/lib/debug/histogram_view.cc
- src/core/lib/debug/stats.cc
- src/core/lib/debug/stats_data.cc
- src/core/lib/debug/trace.cc
@ -1927,6 +1929,7 @@ libs:
- src/core/lib/compression/message_compress.h
- src/core/lib/config/core_configuration.h
- src/core/lib/debug/event_log.h
- src/core/lib/debug/histogram_view.h
- src/core/lib/debug/stats.h
- src/core/lib/debug/stats_data.h
- src/core/lib/debug/trace.h
@ -2282,6 +2285,7 @@ libs:
- src/core/lib/compression/message_compress.cc
- src/core/lib/config/core_configuration.cc
- src/core/lib/debug/event_log.cc
- src/core/lib/debug/histogram_view.cc
- src/core/lib/debug/stats.cc
- src/core/lib/debug/stats_data.cc
- src/core/lib/debug/trace.cc
@ -8392,7 +8396,6 @@ targets:
run: false
language: c++
headers:
- src/cpp/util/core_stats.h
- test/cpp/qps/benchmark_config.h
- test/cpp/qps/client.h
- test/cpp/qps/driver.h
@ -8406,7 +8409,6 @@ targets:
- test/cpp/qps/stats.h
- test/cpp/qps/usage_timer.h
src:
- src/proto/grpc/core/stats.proto
- src/proto/grpc/testing/benchmark_service.proto
- src/proto/grpc/testing/control.proto
- src/proto/grpc/testing/messages.proto
@ -8414,7 +8416,6 @@ targets:
- src/proto/grpc/testing/report_qps_scenario_service.proto
- src/proto/grpc/testing/stats.proto
- src/proto/grpc/testing/worker_service.proto
- src/cpp/util/core_stats.cc
- test/cpp/qps/benchmark_config.cc
- test/cpp/qps/client_async.cc
- test/cpp/qps/client_callback.cc
@ -8437,7 +8438,6 @@ targets:
run: false
language: c++
headers:
- src/cpp/util/core_stats.h
- test/cpp/qps/client.h
- test/cpp/qps/histogram.h
- test/cpp/qps/interarrival.h
@ -8447,14 +8447,12 @@ targets:
- test/cpp/qps/stats.h
- test/cpp/qps/usage_timer.h
src:
- src/proto/grpc/core/stats.proto
- src/proto/grpc/testing/benchmark_service.proto
- src/proto/grpc/testing/control.proto
- src/proto/grpc/testing/messages.proto
- src/proto/grpc/testing/payloads.proto
- src/proto/grpc/testing/stats.proto
- src/proto/grpc/testing/worker_service.proto
- src/cpp/util/core_stats.cc
- test/cpp/qps/client_async.cc
- test/cpp/qps/client_callback.cc
- test/cpp/qps/client_sync.cc

1
config.m4 generated

@ -466,6 +466,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/compression/message_compress.cc \
src/core/lib/config/core_configuration.cc \
src/core/lib/debug/event_log.cc \
src/core/lib/debug/histogram_view.cc \
src/core/lib/debug/stats.cc \
src/core/lib/debug/stats_data.cc \
src/core/lib/debug/trace.cc \

1
config.w32 generated

@ -432,6 +432,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\compression\\message_compress.cc " +
"src\\core\\lib\\config\\core_configuration.cc " +
"src\\core\\lib\\debug\\event_log.cc " +
"src\\core\\lib\\debug\\histogram_view.cc " +
"src\\core\\lib\\debug\\stats.cc " +
"src\\core\\lib\\debug\\stats_data.cc " +
"src\\core\\lib\\debug\\trace.cc " +

2
gRPC-C++.podspec generated

@ -678,6 +678,7 @@ Pod::Spec.new do |s|
'src/core/lib/compression/message_compress.h',
'src/core/lib/config/core_configuration.h',
'src/core/lib/debug/event_log.h',
'src/core/lib/debug/histogram_view.h',
'src/core/lib/debug/stats.h',
'src/core/lib/debug/stats_data.h',
'src/core/lib/debug/trace.h',
@ -1542,6 +1543,7 @@ Pod::Spec.new do |s|
'src/core/lib/compression/message_compress.h',
'src/core/lib/config/core_configuration.h',
'src/core/lib/debug/event_log.h',
'src/core/lib/debug/histogram_view.h',
'src/core/lib/debug/stats.h',
'src/core/lib/debug/stats_data.h',
'src/core/lib/debug/trace.h',

3
gRPC-Core.podspec generated

@ -1041,6 +1041,8 @@ Pod::Spec.new do |s|
'src/core/lib/config/core_configuration.h',
'src/core/lib/debug/event_log.cc',
'src/core/lib/debug/event_log.h',
'src/core/lib/debug/histogram_view.cc',
'src/core/lib/debug/histogram_view.h',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats.h',
'src/core/lib/debug/stats_data.cc',
@ -2170,6 +2172,7 @@ Pod::Spec.new do |s|
'src/core/lib/compression/message_compress.h',
'src/core/lib/config/core_configuration.h',
'src/core/lib/debug/event_log.h',
'src/core/lib/debug/histogram_view.h',
'src/core/lib/debug/stats.h',
'src/core/lib/debug/stats_data.h',
'src/core/lib/debug/trace.h',

2
grpc.gemspec generated

@ -952,6 +952,8 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/config/core_configuration.h )
s.files += %w( src/core/lib/debug/event_log.cc )
s.files += %w( src/core/lib/debug/event_log.h )
s.files += %w( src/core/lib/debug/histogram_view.cc )
s.files += %w( src/core/lib/debug/histogram_view.h )
s.files += %w( src/core/lib/debug/stats.cc )
s.files += %w( src/core/lib/debug/stats.h )
s.files += %w( src/core/lib/debug/stats_data.cc )

2
grpc.gyp generated

@ -798,6 +798,7 @@
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/event_log.cc',
'src/core/lib/debug/histogram_view.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
@ -1248,6 +1249,7 @@
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/event_log.cc',
'src/core/lib/debug/histogram_view.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',

2
package.xml generated

@ -934,6 +934,8 @@
<file baseinstalldir="/" name="src/core/lib/config/core_configuration.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/event_log.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/event_log.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/histogram_view.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/histogram_view.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/stats.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/stats.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/debug/stats_data.cc" role="src" />

@ -49,6 +49,7 @@
#include "src/core/lib/channel/channelz.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/event_engine/default_event_engine.h"
#include "src/core/lib/gpr/alloc.h"
@ -635,7 +636,7 @@ Subchannel::Subchannel(SubchannelKey key,
// triggering segmentation faults. To prevent this issue, we call a grpc_init
// here and a grpc_shutdown in the subchannel destructor.
InitInternally();
GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED();
global_stats().IncrementClientSubchannelsCreated();
GRPC_CLOSURE_INIT(&on_connecting_finished_, OnConnectingFinished, this,
grpc_schedule_on_exec_ctx);
// Check proxy mapper to determine address to connect to and channel

@ -60,6 +60,7 @@
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/experiments/experiments.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/bitset.h"
@ -1282,7 +1283,7 @@ static void perform_stream_op_locked(void* stream_op,
if (op->send_message) {
t->num_messages_in_next_write++;
GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(
grpc_core::global_stats().IncrementHttp2SendMessageSize(
op->payload->send_message.send_message->Length());
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->send_message_finished = add_closure_barrier(op->on_complete);

@ -48,6 +48,7 @@
#include "src/core/ext/transport/chttp2/transport/stream_map.h"
#include "src/core/lib/channel/channelz.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/ref_counted.h"
@ -169,7 +170,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
&pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
grpc_slice_buffer_add(&t->outbuf,
grpc_chttp2_ping_create(false, pq->inflight_id));
GRPC_STATS_INC_HTTP2_PINGS_SENT();
grpc_core::global_stats().IncrementHttp2PingsSent();
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_bdp_estimator_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
@ -263,7 +264,7 @@ namespace {
class WriteContext {
public:
explicit WriteContext(grpc_chttp2_transport* t) : t_(t) {
GRPC_STATS_INC_HTTP2_WRITES_BEGUN();
grpc_core::global_stats().IncrementHttp2WritesBegun();
}
void FlushSettings() {
@ -276,7 +277,7 @@ class WriteContext {
t_->force_send_settings = false;
t_->dirtied_local_settings = false;
t_->sent_local_settings = true;
GRPC_STATS_INC_HTTP2_SETTINGS_WRITES();
grpc_core::global_stats().IncrementHttp2SettingsWrites();
}
}
@ -502,11 +503,11 @@ class StreamWriteContext {
if (!data_send_context.AnyOutgoing()) {
if (t_->flow_control.remote_window() <= 0) {
GRPC_STATS_INC_HTTP2_TRANSPORT_STALLS();
grpc_core::global_stats().IncrementHttp2TransportStalls();
report_stall(t_, s_, "transport");
grpc_chttp2_list_add_stalled_by_transport(t_, s_);
} else if (data_send_context.stream_remote_window() <= 0) {
GRPC_STATS_INC_HTTP2_STREAM_STALLS();
grpc_core::global_stats().IncrementHttp2StreamStalls();
report_stall(t_, s_, "stream");
grpc_chttp2_list_add_stalled_by_stream(t_, s_);
}

@ -0,0 +1,69 @@
// Copyright 2021 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <grpc/support/port_platform.h>
#include "src/core/lib/debug/histogram_view.h"
namespace grpc_core {
double HistogramView::Count() const {
double sum = 0;
for (int i = 0; i < num_buckets; i++) {
sum += buckets[i];
}
return sum;
}
double HistogramView::ThresholdForCountBelow(double count_below) const {
double lower_bound;
double upper_bound;
int upper_idx;
// find the lowest bucket that gets us above count_below
double count_so_far = 0.0;
int lower_idx = 0;
for (; lower_idx < num_buckets; lower_idx++) {
count_so_far += static_cast<double>(buckets[lower_idx]);
if (count_so_far >= count_below) {
break;
}
}
if (count_so_far == count_below) {
// this bucket hits the threshold exactly... we should be midway through
// any run of zero values following the bucket
for (upper_idx = lower_idx + 1; upper_idx < num_buckets; upper_idx++) {
if (buckets[upper_idx]) {
break;
}
}
return (bucket_boundaries[lower_idx] + bucket_boundaries[upper_idx]) / 2.0;
} else {
// treat values as uniform throughout the bucket, and find where this value
// should lie
lower_bound = bucket_boundaries[lower_idx];
upper_bound = bucket_boundaries[lower_idx + 1];
return upper_bound - (upper_bound - lower_bound) *
(count_so_far - count_below) /
static_cast<double>(buckets[lower_idx]);
}
}
double HistogramView::Percentile(double p) const {
const double count = Count();
if (count == 0) return 0.0;
return ThresholdForCountBelow(count * p / 100.0);
}
} // namespace grpc_core

@ -1,4 +1,4 @@
// Copyright 2017 gRPC authors.
// Copyright 2021 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,27 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
#ifndef GRPC_CORE_LIB_DEBUG_HISTOGRAM_VIEW_H
#define GRPC_CORE_LIB_DEBUG_HISTOGRAM_VIEW_H
package grpc.core;
#include <grpc/support/port_platform.h>
message Bucket {
double start = 1;
uint64 count = 2;
}
#include <stdint.h>
message Histogram {
repeated Bucket buckets = 1;
}
namespace grpc_core {
message Metric {
string name = 1;
oneof value {
uint64 count = 10;
Histogram histogram = 11;
}
}
struct HistogramView {
int (*bucket_for)(int value);
const int* bucket_boundaries;
int num_buckets;
const uint64_t* buckets;
message Stats {
repeated Metric metrics = 1;
}
double Percentile(double p) const;
double Count() const;
double ThresholdForCountBelow(double count_below) const;
};
} // namespace grpc_core
#endif // GRPC_CORE_LIB_DEBUG_HISTOGRAM_VIEW_H

@ -20,146 +20,49 @@
#include "src/core/lib/debug/stats.h"
#include <inttypes.h>
#include <string.h>
#include <stddef.h>
#include <algorithm>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include <grpc/support/alloc.h>
#include <grpc/support/cpu.h>
namespace grpc_core {
Stats* const g_stats_data = [] {
size_t num_cores = gpr_cpu_num_cores();
Stats* stats = static_cast<Stats*>(
gpr_zalloc(sizeof(Stats) + num_cores * sizeof(grpc_stats_data)));
stats->num_cores = num_cores;
return stats;
}();
} // namespace grpc_core
void grpc_stats_collect(grpc_stats_data* output) {
memset(output, 0, sizeof(*output));
for (size_t core = 0; core < grpc_core::g_stats_data->num_cores; core++) {
for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
output->counters[i] += gpr_atm_no_barrier_load(
&grpc_core::g_stats_data->per_cpu[core].counters[i]);
}
for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_BUCKETS; i++) {
output->histograms[i] += gpr_atm_no_barrier_load(
&grpc_core::g_stats_data->per_cpu[core].histograms[i]);
}
}
}
void grpc_stats_diff(const grpc_stats_data* b, const grpc_stats_data* a,
grpc_stats_data* c) {
for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
c->counters[i] = b->counters[i] - a->counters[i];
}
for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_BUCKETS; i++) {
c->histograms[i] = b->histograms[i] - a->histograms[i];
}
}
void grpc_stats_inc_histogram_value(int histogram, int value) {
const int bucket = grpc_stats_get_bucket[histogram](value);
gpr_atm_no_barrier_fetch_add(
&GRPC_THREAD_STATS_DATA()
->histograms[grpc_stats_histo_start[histogram] + bucket],
1);
}
size_t grpc_stats_histo_count(const grpc_stats_data* stats,
grpc_stats_histograms histogram) {
size_t sum = 0;
for (int i = 0; i < grpc_stats_histo_buckets[histogram]; i++) {
sum += static_cast<size_t>(
stats->histograms[grpc_stats_histo_start[histogram] + i]);
}
return sum;
}
static double threshold_for_count_below(const gpr_atm* bucket_counts,
const int* bucket_boundaries,
int num_buckets, double count_below) {
double count_so_far;
double lower_bound;
double upper_bound;
int lower_idx;
int upper_idx;
/* find the lowest bucket that gets us above count_below */
count_so_far = 0.0;
for (lower_idx = 0; lower_idx < num_buckets; lower_idx++) {
count_so_far += static_cast<double>(bucket_counts[lower_idx]);
if (count_so_far >= count_below) {
break;
}
}
if (count_so_far == count_below) {
/* this bucket hits the threshold exactly... we should be midway through
any run of zero values following the bucket */
for (upper_idx = lower_idx + 1; upper_idx < num_buckets; upper_idx++) {
if (bucket_counts[upper_idx]) {
break;
}
}
return (bucket_boundaries[lower_idx] + bucket_boundaries[upper_idx]) / 2.0;
} else {
/* treat values as uniform throughout the bucket, and find where this value
should lie */
lower_bound = bucket_boundaries[lower_idx];
upper_bound = bucket_boundaries[lower_idx + 1];
return upper_bound - (upper_bound - lower_bound) *
(count_so_far - count_below) /
static_cast<double>(bucket_counts[lower_idx]);
}
}
double grpc_stats_histo_percentile(const grpc_stats_data* stats,
grpc_stats_histograms histogram,
double percentile) {
size_t count = grpc_stats_histo_count(stats, histogram);
if (count == 0) return 0.0;
return threshold_for_count_below(
stats->histograms + grpc_stats_histo_start[histogram],
grpc_stats_histo_bucket_boundaries[histogram],
grpc_stats_histo_buckets[histogram],
static_cast<double>(count) * percentile / 100.0);
}
namespace stats_detail {
namespace {
template <typename I>
std::string ArrayToJson(const I* values, size_t count) {
std::string ArrayToJson(absl::Span<const I> values) {
std::vector<std::string> parts;
for (size_t i = 0; i < count; i++) {
parts.push_back(absl::StrFormat("%d", values[i]));
for (auto value : values) {
parts.push_back(absl::StrCat(value));
}
return absl::StrCat("[", absl::StrJoin(parts, ","), "]");
}
} // namespace
std::string grpc_stats_data_as_json(const grpc_stats_data* data) {
std::string StatsAsJson(absl::Span<const uint64_t> counters,
absl::Span<const absl::string_view> counter_name,
absl::Span<const HistogramView> histograms,
absl::Span<const absl::string_view> histogram_name) {
std::vector<std::string> parts;
for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
parts.push_back(absl::StrFormat(
"\"%s\": %" PRIdPTR, grpc_stats_counter_name[i], data->counters[i]));
for (size_t i = 0; i < counters.size(); i++) {
parts.push_back(absl::StrCat("\"", counter_name[i], "\": ", counters[i]));
}
for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
parts.push_back(absl::StrFormat(
"\"%s\": %s", grpc_stats_histogram_name[i],
ArrayToJson(data->histograms + grpc_stats_histo_start[i],
grpc_stats_histo_buckets[i])));
for (size_t i = 0; i < histograms.size(); i++) {
parts.push_back(
absl::StrFormat("\"%s_bkt\": %s", grpc_stats_histogram_name[i],
ArrayToJson(grpc_stats_histo_bucket_boundaries[i],
grpc_stats_histo_buckets[i])));
absl::StrCat("\"", histogram_name[i], "\": ",
ArrayToJson(absl::Span<const uint64_t>(
histograms[i].buckets, histograms[i].num_buckets))));
parts.push_back(absl::StrCat(
"\"", histogram_name[i], "_bkt\": ",
ArrayToJson(absl::Span<const int>(histograms[i].bucket_boundaries,
histograms[i].num_buckets))));
}
return absl::StrCat("{", absl::StrJoin(parts, ", "), "}");
}
} // namespace stats_detail
} // namespace grpc_core

@ -21,50 +21,44 @@
#include <grpc/support/port_platform.h>
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <vector>
#include <grpc/support/atm.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "src/core/lib/debug/stats_data.h" // IWYU pragma: export
#include "src/core/lib/iomgr/exec_ctx.h"
typedef struct grpc_stats_data {
gpr_atm counters[GRPC_STATS_COUNTER_COUNT];
gpr_atm histograms[GRPC_STATS_HISTOGRAM_BUCKETS];
} grpc_stats_data;
#include "src/core/lib/debug/histogram_view.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/gprpp/no_destruct.h"
namespace grpc_core {
struct Stats {
size_t num_cores;
grpc_stats_data per_cpu[0];
};
extern Stats* const g_stats_data;
} // namespace grpc_core
#define GRPC_THREAD_STATS_DATA() \
(&::grpc_core::g_stats_data \
->per_cpu[grpc_core::ExecCtx::Get()->starting_cpu()])
inline GlobalStatsCollector& global_stats() {
return *NoDestructSingleton<GlobalStatsCollector>::Get();
}
#define GRPC_STATS_INC_COUNTER(ctr) \
(gpr_atm_no_barrier_fetch_add(&GRPC_THREAD_STATS_DATA()->counters[(ctr)], 1))
namespace stats_detail {
std::string StatsAsJson(absl::Span<const uint64_t> counters,
absl::Span<const absl::string_view> counter_name,
absl::Span<const HistogramView> histograms,
absl::Span<const absl::string_view> histogram_name);
}
#define GRPC_STATS_INC_HISTOGRAM(histogram, index) \
(gpr_atm_no_barrier_fetch_add( \
&GRPC_THREAD_STATS_DATA()->histograms[histogram##_FIRST_SLOT + (index)], \
1))
template <typename T>
std::string StatsAsJson(T* data) {
std::vector<HistogramView> histograms;
for (int i = 0; i < static_cast<int>(T::Histogram::COUNT); i++) {
histograms.push_back(
data->histogram(static_cast<typename T::Histogram>(i)));
}
return stats_detail::StatsAsJson(
absl::Span<const uint64_t>(data->counters,
static_cast<int>(T::Counter::COUNT)),
T::counter_name, histograms, T::histogram_name);
}
void grpc_stats_collect(grpc_stats_data* output);
// c = b-a
void grpc_stats_diff(const grpc_stats_data* b, const grpc_stats_data* a,
grpc_stats_data* c);
std::string grpc_stats_data_as_json(const grpc_stats_data* data);
double grpc_stats_histo_percentile(const grpc_stats_data* stats,
grpc_stats_histograms histogram,
double percentile);
size_t grpc_stats_histo_count(const grpc_stats_data* stats,
grpc_stats_histograms histogram);
void grpc_stats_inc_histogram_value(int histogram, int value);
} // namespace grpc_core
#endif // GRPC_CORE_LIB_DEBUG_STATS_H

@ -24,34 +24,75 @@
#include <stdint.h>
#include "src/core/lib/debug/stats.h"
namespace grpc_core {
namespace {
union DblUint {
double dbl;
uint64_t uint;
};
} // namespace
const char* grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"client_calls_created",
"server_calls_created",
"client_channels_created",
"client_subchannels_created",
"server_channels_created",
"syscall_write",
"syscall_read",
"tcp_read_alloc_8k",
"tcp_read_alloc_64k",
"http2_settings_writes",
"http2_pings_sent",
"http2_writes_begun",
"http2_transport_stalls",
"http2_stream_stalls",
"cq_pluck_creates",
"cq_next_creates",
"cq_callback_creates",
void HistogramCollector_32768_24::Collect(Histogram_32768_24* result) const {
for (int i = 0; i < 24; i++) {
result->buckets_[i] += buckets_[i].load(std::memory_order_relaxed);
}
}
Histogram_32768_24 operator-(const Histogram_32768_24& left,
const Histogram_32768_24& right) {
Histogram_32768_24 result;
for (int i = 0; i < 24; i++) {
result.buckets_[i] = left.buckets_[i] - right.buckets_[i];
}
return result;
}
void HistogramCollector_16777216_20::Collect(
Histogram_16777216_20* result) const {
for (int i = 0; i < 20; i++) {
result->buckets_[i] += buckets_[i].load(std::memory_order_relaxed);
}
}
Histogram_16777216_20 operator-(const Histogram_16777216_20& left,
const Histogram_16777216_20& right) {
Histogram_16777216_20 result;
for (int i = 0; i < 20; i++) {
result.buckets_[i] = left.buckets_[i] - right.buckets_[i];
}
return result;
}
void HistogramCollector_80_10::Collect(Histogram_80_10* result) const {
for (int i = 0; i < 10; i++) {
result->buckets_[i] += buckets_[i].load(std::memory_order_relaxed);
}
}
Histogram_80_10 operator-(const Histogram_80_10& left,
const Histogram_80_10& right) {
Histogram_80_10 result;
for (int i = 0; i < 10; i++) {
result.buckets_[i] = left.buckets_[i] - right.buckets_[i];
}
return result;
}
const absl::string_view
GlobalStats::counter_name[static_cast<int>(Counter::COUNT)] = {
"client_calls_created",
"server_calls_created",
"client_channels_created",
"client_subchannels_created",
"server_channels_created",
"syscall_write",
"syscall_read",
"tcp_read_alloc_8k",
"tcp_read_alloc_64k",
"http2_settings_writes",
"http2_pings_sent",
"http2_writes_begun",
"http2_transport_stalls",
"http2_stream_stalls",
"cq_pluck_creates",
"cq_next_creates",
"cq_callback_creates",
};
const char* grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
const absl::string_view GlobalStats::counter_doc[static_cast<int>(
Counter::COUNT)] = {
"Number of client side calls created by this process",
"Number of server side calls created by this process",
"Number of client channels created",
@ -76,37 +117,40 @@ const char* grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of completion queues created for cq_callback (indicates callback "
"api usage)",
};
const char* grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"call_initial_size", "tcp_write_size", "tcp_write_iov_size",
"tcp_read_size", "tcp_read_offer", "tcp_read_offer_iov_size",
"http2_send_message_size",
const absl::string_view
GlobalStats::histogram_name[static_cast<int>(Histogram::COUNT)] = {
"call_initial_size", "tcp_write_size", "tcp_write_iov_size",
"tcp_read_size", "tcp_read_offer", "tcp_read_offer_iov_size",
"http2_send_message_size",
};
const char* grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Initial size of the grpc_call arena created at call start",
"Number of bytes offered to each syscall_write",
"Number of byte segments offered to each syscall_write",
"Number of bytes received by each syscall_read",
"Number of bytes offered to each syscall_read",
"Number of byte segments offered to each syscall_read",
"Size of messages received by HTTP2 transport",
const absl::string_view
GlobalStats::histogram_doc[static_cast<int>(Histogram::COUNT)] = {
"Initial size of the grpc_call arena created at call start",
"Number of bytes offered to each syscall_write",
"Number of byte segments offered to each syscall_write",
"Number of bytes received by each syscall_read",
"Number of bytes offered to each syscall_read",
"Number of byte segments offered to each syscall_read",
"Size of messages received by HTTP2 transport",
};
const int grpc_stats_table_0[25] = {
namespace {
const int kStatsTable0[25] = {
0, 1, 2, 4, 7, 11, 17, 26, 40, 61, 93, 142, 216,
329, 500, 760, 1155, 1755, 2667, 4052, 6155, 9350, 14203, 21574, 32768};
const uint8_t grpc_stats_table_1[27] = {3, 3, 4, 5, 6, 6, 7, 8, 9,
10, 11, 11, 12, 13, 14, 15, 16, 16,
17, 18, 19, 20, 20, 21, 22, 23, 24};
const int grpc_stats_table_2[21] = {
const uint8_t kStatsTable1[27] = {3, 3, 4, 5, 6, 6, 7, 8, 9,
10, 11, 11, 12, 13, 14, 15, 16, 16,
17, 18, 19, 20, 20, 21, 22, 23, 24};
const int kStatsTable2[21] = {
0, 1, 3, 8, 19, 45, 106,
250, 588, 1383, 3252, 7646, 17976, 42262,
99359, 233593, 549177, 1291113, 3035402, 7136218, 16777216};
const uint8_t grpc_stats_table_3[23] = {2, 3, 3, 4, 5, 6, 7, 8,
8, 9, 10, 11, 12, 12, 13, 14,
15, 16, 16, 17, 18, 19, 20};
const int grpc_stats_table_4[11] = {0, 1, 2, 4, 7, 11, 17, 26, 38, 56, 80};
const uint8_t grpc_stats_table_5[9] = {3, 3, 4, 5, 6, 6, 7, 8, 9};
namespace grpc_core {
int BucketForHistogramValue_32768_24(int value) {
const uint8_t kStatsTable3[23] = {2, 3, 3, 4, 5, 6, 7, 8,
8, 9, 10, 11, 12, 12, 13, 14,
15, 16, 16, 17, 18, 19, 20};
const int kStatsTable4[11] = {0, 1, 2, 4, 7, 11, 17, 26, 38, 56, 80};
const uint8_t kStatsTable5[9] = {3, 3, 4, 5, 6, 6, 7, 8, 9};
} // namespace
int Histogram_32768_24::BucketFor(int value) {
if (value < 3) {
if (value < 0) {
return 0;
@ -115,19 +159,17 @@ int BucketForHistogramValue_32768_24(int value) {
}
} else {
if (value < 24577) {
// first_nontrivial_code=4613937818241073152
// last_code=4672484613396889600 [24576.000000]
DblUint val;
val.dbl = value;
const int bucket =
grpc_stats_table_1[((val.uint - 4613937818241073152ull) >> 51)];
return bucket - (value < grpc_stats_table_0[bucket]);
kStatsTable1[((val.uint - 4613937818241073152ull) >> 51)];
return bucket - (value < kStatsTable0[bucket]);
} else {
return 23;
}
}
}
int BucketForHistogramValue_16777216_20(int value) {
int Histogram_16777216_20::BucketFor(int value) {
if (value < 2) {
if (value < 0) {
return 0;
@ -136,19 +178,17 @@ int BucketForHistogramValue_16777216_20(int value) {
}
} else {
if (value < 8388609) {
// first_nontrivial_code=4611686018427387904
// last_code=4710765210229538816 [8388608.000000]
DblUint val;
val.dbl = value;
const int bucket =
grpc_stats_table_3[((val.uint - 4611686018427387904ull) >> 52)];
return bucket - (value < grpc_stats_table_2[bucket]);
kStatsTable3[((val.uint - 4611686018427387904ull) >> 52)];
return bucket - (value < kStatsTable2[bucket]);
} else {
return 19;
}
}
}
int BucketForHistogramValue_80_10(int value) {
int Histogram_80_10::BucketFor(int value) {
if (value < 3) {
if (value < 0) {
return 0;
@ -157,13 +197,11 @@ int BucketForHistogramValue_80_10(int value) {
}
} else {
if (value < 49) {
// first_nontrivial_code=4613937818241073152
// last_code=4631952216750555136 [48.000000]
DblUint val;
val.dbl = value;
const int bucket =
grpc_stats_table_5[((val.uint - 4613937818241073152ull) >> 51)];
return bucket - (value < grpc_stats_table_4[bucket]);
kStatsTable5[((val.uint - 4613937818241073152ull) >> 51)];
return bucket - (value < kStatsTable4[bucket]);
} else {
if (value < 56) {
return 8;
@ -173,18 +211,131 @@ int BucketForHistogramValue_80_10(int value) {
}
}
}
GlobalStats::GlobalStats()
: client_calls_created{0},
server_calls_created{0},
client_channels_created{0},
client_subchannels_created{0},
server_channels_created{0},
syscall_write{0},
syscall_read{0},
tcp_read_alloc_8k{0},
tcp_read_alloc_64k{0},
http2_settings_writes{0},
http2_pings_sent{0},
http2_writes_begun{0},
http2_transport_stalls{0},
http2_stream_stalls{0},
cq_pluck_creates{0},
cq_next_creates{0},
cq_callback_creates{0} {}
HistogramView GlobalStats::histogram(Histogram which) const {
switch (which) {
default:
GPR_UNREACHABLE_CODE(return HistogramView());
case Histogram::kCallInitialSize:
return HistogramView{&Histogram_32768_24::BucketFor, kStatsTable0, 24,
call_initial_size.buckets()};
case Histogram::kTcpWriteSize:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable2, 20,
tcp_write_size.buckets()};
case Histogram::kTcpWriteIovSize:
return HistogramView{&Histogram_80_10::BucketFor, kStatsTable4, 10,
tcp_write_iov_size.buckets()};
case Histogram::kTcpReadSize:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable2, 20,
tcp_read_size.buckets()};
case Histogram::kTcpReadOffer:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable2, 20,
tcp_read_offer.buckets()};
case Histogram::kTcpReadOfferIovSize:
return HistogramView{&Histogram_80_10::BucketFor, kStatsTable4, 10,
tcp_read_offer_iov_size.buckets()};
case Histogram::kHttp2SendMessageSize:
return HistogramView{&Histogram_16777216_20::BucketFor, kStatsTable2, 20,
http2_send_message_size.buckets()};
}
}
std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {
auto result = std::make_unique<GlobalStats>();
for (const auto& data : data_) {
result->client_calls_created +=
data.client_calls_created.load(std::memory_order_relaxed);
result->server_calls_created +=
data.server_calls_created.load(std::memory_order_relaxed);
result->client_channels_created +=
data.client_channels_created.load(std::memory_order_relaxed);
result->client_subchannels_created +=
data.client_subchannels_created.load(std::memory_order_relaxed);
result->server_channels_created +=
data.server_channels_created.load(std::memory_order_relaxed);
result->syscall_write += data.syscall_write.load(std::memory_order_relaxed);
result->syscall_read += data.syscall_read.load(std::memory_order_relaxed);
result->tcp_read_alloc_8k +=
data.tcp_read_alloc_8k.load(std::memory_order_relaxed);
result->tcp_read_alloc_64k +=
data.tcp_read_alloc_64k.load(std::memory_order_relaxed);
result->http2_settings_writes +=
data.http2_settings_writes.load(std::memory_order_relaxed);
result->http2_pings_sent +=
data.http2_pings_sent.load(std::memory_order_relaxed);
result->http2_writes_begun +=
data.http2_writes_begun.load(std::memory_order_relaxed);
result->http2_transport_stalls +=
data.http2_transport_stalls.load(std::memory_order_relaxed);
result->http2_stream_stalls +=
data.http2_stream_stalls.load(std::memory_order_relaxed);
result->cq_pluck_creates +=
data.cq_pluck_creates.load(std::memory_order_relaxed);
result->cq_next_creates +=
data.cq_next_creates.load(std::memory_order_relaxed);
result->cq_callback_creates +=
data.cq_callback_creates.load(std::memory_order_relaxed);
data.call_initial_size.Collect(&result->call_initial_size);
data.tcp_write_size.Collect(&result->tcp_write_size);
data.tcp_write_iov_size.Collect(&result->tcp_write_iov_size);
data.tcp_read_size.Collect(&result->tcp_read_size);
data.tcp_read_offer.Collect(&result->tcp_read_offer);
data.tcp_read_offer_iov_size.Collect(&result->tcp_read_offer_iov_size);
data.http2_send_message_size.Collect(&result->http2_send_message_size);
}
return result;
}
std::unique_ptr<GlobalStats> GlobalStats::Diff(const GlobalStats& other) const {
auto result = std::make_unique<GlobalStats>();
result->client_calls_created =
client_calls_created - other.client_calls_created;
result->server_calls_created =
server_calls_created - other.server_calls_created;
result->client_channels_created =
client_channels_created - other.client_channels_created;
result->client_subchannels_created =
client_subchannels_created - other.client_subchannels_created;
result->server_channels_created =
server_channels_created - other.server_channels_created;
result->syscall_write = syscall_write - other.syscall_write;
result->syscall_read = syscall_read - other.syscall_read;
result->tcp_read_alloc_8k = tcp_read_alloc_8k - other.tcp_read_alloc_8k;
result->tcp_read_alloc_64k = tcp_read_alloc_64k - other.tcp_read_alloc_64k;
result->http2_settings_writes =
http2_settings_writes - other.http2_settings_writes;
result->http2_pings_sent = http2_pings_sent - other.http2_pings_sent;
result->http2_writes_begun = http2_writes_begun - other.http2_writes_begun;
result->http2_transport_stalls =
http2_transport_stalls - other.http2_transport_stalls;
result->http2_stream_stalls = http2_stream_stalls - other.http2_stream_stalls;
result->cq_pluck_creates = cq_pluck_creates - other.cq_pluck_creates;
result->cq_next_creates = cq_next_creates - other.cq_next_creates;
result->cq_callback_creates = cq_callback_creates - other.cq_callback_creates;
result->call_initial_size = call_initial_size - other.call_initial_size;
result->tcp_write_size = tcp_write_size - other.tcp_write_size;
result->tcp_write_iov_size = tcp_write_iov_size - other.tcp_write_iov_size;
result->tcp_read_size = tcp_read_size - other.tcp_read_size;
result->tcp_read_offer = tcp_read_offer - other.tcp_read_offer;
result->tcp_read_offer_iov_size =
tcp_read_offer_iov_size - other.tcp_read_offer_iov_size;
result->http2_send_message_size =
http2_send_message_size - other.http2_send_message_size;
return result;
}
} // namespace grpc_core
const int grpc_stats_histo_buckets[7] = {24, 20, 10, 20, 20, 10, 20};
const int grpc_stats_histo_start[7] = {0, 24, 44, 54, 74, 94, 104};
const int* const grpc_stats_histo_bucket_boundaries[7] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
grpc_stats_table_2, grpc_stats_table_2, grpc_stats_table_4,
grpc_stats_table_2};
int (*const grpc_stats_get_bucket[7])(int value) = {
grpc_core::BucketForHistogramValue_32768_24,
grpc_core::BucketForHistogramValue_16777216_20,
grpc_core::BucketForHistogramValue_80_10,
grpc_core::BucketForHistogramValue_16777216_20,
grpc_core::BucketForHistogramValue_16777216_20,
grpc_core::BucketForHistogramValue_80_10,
grpc_core::BucketForHistogramValue_16777216_20};

@ -23,129 +23,270 @@
#include <grpc/support/port_platform.h>
// IWYU pragma: private, include "src/core/lib/debug/stats.h"
typedef enum {
GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED,
GRPC_STATS_COUNTER_SERVER_CALLS_CREATED,
GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED,
GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED,
GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED,
GRPC_STATS_COUNTER_SYSCALL_WRITE,
GRPC_STATS_COUNTER_SYSCALL_READ,
GRPC_STATS_COUNTER_TCP_READ_ALLOC_8K,
GRPC_STATS_COUNTER_TCP_READ_ALLOC_64K,
GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES,
GRPC_STATS_COUNTER_HTTP2_PINGS_SENT,
GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN,
GRPC_STATS_COUNTER_HTTP2_TRANSPORT_STALLS,
GRPC_STATS_COUNTER_HTTP2_STREAM_STALLS,
GRPC_STATS_COUNTER_CQ_PLUCK_CREATES,
GRPC_STATS_COUNTER_CQ_NEXT_CREATES,
GRPC_STATS_COUNTER_CQ_CALLBACK_CREATES,
GRPC_STATS_COUNTER_COUNT
} grpc_stats_counters;
extern const char* grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
extern const char* grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT];
typedef enum {
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
extern const char* grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
extern const char* grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT];
typedef enum {
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_FIRST_SLOT = 0,
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_BUCKETS = 24,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 24,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_BUCKETS = 20,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 44,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 10,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 54,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 20,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 74,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_BUCKETS = 20,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_FIRST_SLOT = 94,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_BUCKETS = 10,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 104,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 20,
GRPC_STATS_HISTOGRAM_BUCKETS = 124
} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
#define GRPC_STATS_INC_SERVER_CALLS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_CALLS_CREATED)
#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED)
#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED)
#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED)
#define GRPC_STATS_INC_SYSCALL_WRITE() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_WRITE)
#define GRPC_STATS_INC_SYSCALL_READ() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_READ)
#define GRPC_STATS_INC_TCP_READ_ALLOC_8K() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_TCP_READ_ALLOC_8K)
#define GRPC_STATS_INC_TCP_READ_ALLOC_64K() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_TCP_READ_ALLOC_64K)
#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES)
#define GRPC_STATS_INC_HTTP2_PINGS_SENT() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_PINGS_SENT)
#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN)
#define GRPC_STATS_INC_HTTP2_TRANSPORT_STALLS() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_TRANSPORT_STALLS)
#define GRPC_STATS_INC_HTTP2_STREAM_STALLS() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_STREAM_STALLS)
#define GRPC_STATS_INC_CQ_PLUCK_CREATES() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_PLUCK_CREATES)
#define GRPC_STATS_INC_CQ_NEXT_CREATES() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_NEXT_CREATES)
#define GRPC_STATS_INC_CQ_CALLBACK_CREATES() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_CALLBACK_CREATES)
#define GRPC_STATS_INC_CALL_INITIAL_SIZE(value) \
GRPC_STATS_INC_HISTOGRAM( \
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, \
grpc_core::BucketForHistogramValue_32768_24(static_cast<int>(value)))
#define GRPC_STATS_INC_TCP_WRITE_SIZE(value) \
GRPC_STATS_INC_HISTOGRAM( \
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, \
grpc_core::BucketForHistogramValue_16777216_20(static_cast<int>(value)))
#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(value) \
GRPC_STATS_INC_HISTOGRAM( \
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \
grpc_core::BucketForHistogramValue_80_10(static_cast<int>(value)))
#define GRPC_STATS_INC_TCP_READ_SIZE(value) \
GRPC_STATS_INC_HISTOGRAM( \
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \
grpc_core::BucketForHistogramValue_16777216_20(static_cast<int>(value)))
#define GRPC_STATS_INC_TCP_READ_OFFER(value) \
GRPC_STATS_INC_HISTOGRAM( \
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, \
grpc_core::BucketForHistogramValue_16777216_20(static_cast<int>(value)))
#define GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(value) \
GRPC_STATS_INC_HISTOGRAM( \
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, \
grpc_core::BucketForHistogramValue_80_10(static_cast<int>(value)))
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(value) \
GRPC_STATS_INC_HISTOGRAM( \
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, \
grpc_core::BucketForHistogramValue_16777216_20(static_cast<int>(value)))
#include <stdint.h>
#include <atomic>
#include <memory>
#include "absl/strings/string_view.h"
#include "src/core/lib/debug/histogram_view.h"
#include "src/core/lib/gprpp/per_cpu.h"
namespace grpc_core {
int BucketForHistogramValue_32768_24(int value);
int BucketForHistogramValue_16777216_20(int value);
int BucketForHistogramValue_80_10(int value);
class HistogramCollector_32768_24;
class Histogram_32768_24 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
friend Histogram_32768_24 operator-(const Histogram_32768_24& left,
const Histogram_32768_24& right);
private:
friend class HistogramCollector_32768_24;
uint64_t buckets_[24]{};
};
class HistogramCollector_32768_24 {
public:
void Increment(int value) {
buckets_[Histogram_32768_24::BucketFor(value)].fetch_add(
1, std::memory_order_relaxed);
}
void Collect(Histogram_32768_24* result) const;
private:
std::atomic<uint64_t> buckets_[24]{};
};
class HistogramCollector_16777216_20;
class Histogram_16777216_20 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
friend Histogram_16777216_20 operator-(const Histogram_16777216_20& left,
const Histogram_16777216_20& right);
private:
friend class HistogramCollector_16777216_20;
uint64_t buckets_[20]{};
};
class HistogramCollector_16777216_20 {
public:
void Increment(int value) {
buckets_[Histogram_16777216_20::BucketFor(value)].fetch_add(
1, std::memory_order_relaxed);
}
void Collect(Histogram_16777216_20* result) const;
private:
std::atomic<uint64_t> buckets_[20]{};
};
class HistogramCollector_80_10;
class Histogram_80_10 {
public:
static int BucketFor(int value);
const uint64_t* buckets() const { return buckets_; }
friend Histogram_80_10 operator-(const Histogram_80_10& left,
const Histogram_80_10& right);
private:
friend class HistogramCollector_80_10;
uint64_t buckets_[10]{};
};
class HistogramCollector_80_10 {
public:
void Increment(int value) {
buckets_[Histogram_80_10::BucketFor(value)].fetch_add(
1, std::memory_order_relaxed);
}
void Collect(Histogram_80_10* result) const;
private:
std::atomic<uint64_t> buckets_[10]{};
};
struct GlobalStats {
enum class Counter {
kClientCallsCreated,
kServerCallsCreated,
kClientChannelsCreated,
kClientSubchannelsCreated,
kServerChannelsCreated,
kSyscallWrite,
kSyscallRead,
kTcpReadAlloc8k,
kTcpReadAlloc64k,
kHttp2SettingsWrites,
kHttp2PingsSent,
kHttp2WritesBegun,
kHttp2TransportStalls,
kHttp2StreamStalls,
kCqPluckCreates,
kCqNextCreates,
kCqCallbackCreates,
COUNT
};
enum class Histogram {
kCallInitialSize,
kTcpWriteSize,
kTcpWriteIovSize,
kTcpReadSize,
kTcpReadOffer,
kTcpReadOfferIovSize,
kHttp2SendMessageSize,
COUNT
};
GlobalStats();
static const absl::string_view counter_name[static_cast<int>(Counter::COUNT)];
static const absl::string_view
histogram_name[static_cast<int>(Histogram::COUNT)];
static const absl::string_view counter_doc[static_cast<int>(Counter::COUNT)];
static const absl::string_view
histogram_doc[static_cast<int>(Histogram::COUNT)];
union {
struct {
uint64_t client_calls_created;
uint64_t server_calls_created;
uint64_t client_channels_created;
uint64_t client_subchannels_created;
uint64_t server_channels_created;
uint64_t syscall_write;
uint64_t syscall_read;
uint64_t tcp_read_alloc_8k;
uint64_t tcp_read_alloc_64k;
uint64_t http2_settings_writes;
uint64_t http2_pings_sent;
uint64_t http2_writes_begun;
uint64_t http2_transport_stalls;
uint64_t http2_stream_stalls;
uint64_t cq_pluck_creates;
uint64_t cq_next_creates;
uint64_t cq_callback_creates;
};
uint64_t counters[static_cast<int>(Counter::COUNT)];
};
Histogram_32768_24 call_initial_size;
Histogram_16777216_20 tcp_write_size;
Histogram_80_10 tcp_write_iov_size;
Histogram_16777216_20 tcp_read_size;
Histogram_16777216_20 tcp_read_offer;
Histogram_80_10 tcp_read_offer_iov_size;
Histogram_16777216_20 http2_send_message_size;
HistogramView histogram(Histogram which) const;
std::unique_ptr<GlobalStats> Diff(const GlobalStats& other) const;
};
class GlobalStatsCollector {
public:
std::unique_ptr<GlobalStats> Collect() const;
void IncrementClientCallsCreated() {
data_.this_cpu().client_calls_created.fetch_add(1,
std::memory_order_relaxed);
}
void IncrementServerCallsCreated() {
data_.this_cpu().server_calls_created.fetch_add(1,
std::memory_order_relaxed);
}
void IncrementClientChannelsCreated() {
data_.this_cpu().client_channels_created.fetch_add(
1, std::memory_order_relaxed);
}
void IncrementClientSubchannelsCreated() {
data_.this_cpu().client_subchannels_created.fetch_add(
1, std::memory_order_relaxed);
}
void IncrementServerChannelsCreated() {
data_.this_cpu().server_channels_created.fetch_add(
1, std::memory_order_relaxed);
}
void IncrementSyscallWrite() {
data_.this_cpu().syscall_write.fetch_add(1, std::memory_order_relaxed);
}
void IncrementSyscallRead() {
data_.this_cpu().syscall_read.fetch_add(1, std::memory_order_relaxed);
}
void IncrementTcpReadAlloc8k() {
data_.this_cpu().tcp_read_alloc_8k.fetch_add(1, std::memory_order_relaxed);
}
void IncrementTcpReadAlloc64k() {
data_.this_cpu().tcp_read_alloc_64k.fetch_add(1, std::memory_order_relaxed);
}
void IncrementHttp2SettingsWrites() {
data_.this_cpu().http2_settings_writes.fetch_add(1,
std::memory_order_relaxed);
}
void IncrementHttp2PingsSent() {
data_.this_cpu().http2_pings_sent.fetch_add(1, std::memory_order_relaxed);
}
void IncrementHttp2WritesBegun() {
data_.this_cpu().http2_writes_begun.fetch_add(1, std::memory_order_relaxed);
}
void IncrementHttp2TransportStalls() {
data_.this_cpu().http2_transport_stalls.fetch_add(
1, std::memory_order_relaxed);
}
void IncrementHttp2StreamStalls() {
data_.this_cpu().http2_stream_stalls.fetch_add(1,
std::memory_order_relaxed);
}
void IncrementCqPluckCreates() {
data_.this_cpu().cq_pluck_creates.fetch_add(1, std::memory_order_relaxed);
}
void IncrementCqNextCreates() {
data_.this_cpu().cq_next_creates.fetch_add(1, std::memory_order_relaxed);
}
void IncrementCqCallbackCreates() {
data_.this_cpu().cq_callback_creates.fetch_add(1,
std::memory_order_relaxed);
}
void IncrementCallInitialSize(int value) {
data_.this_cpu().call_initial_size.Increment(value);
}
void IncrementTcpWriteSize(int value) {
data_.this_cpu().tcp_write_size.Increment(value);
}
void IncrementTcpWriteIovSize(int value) {
data_.this_cpu().tcp_write_iov_size.Increment(value);
}
void IncrementTcpReadSize(int value) {
data_.this_cpu().tcp_read_size.Increment(value);
}
void IncrementTcpReadOffer(int value) {
data_.this_cpu().tcp_read_offer.Increment(value);
}
void IncrementTcpReadOfferIovSize(int value) {
data_.this_cpu().tcp_read_offer_iov_size.Increment(value);
}
void IncrementHttp2SendMessageSize(int value) {
data_.this_cpu().http2_send_message_size.Increment(value);
}
private:
struct Data {
std::atomic<uint64_t> client_calls_created{0};
std::atomic<uint64_t> server_calls_created{0};
std::atomic<uint64_t> client_channels_created{0};
std::atomic<uint64_t> client_subchannels_created{0};
std::atomic<uint64_t> server_channels_created{0};
std::atomic<uint64_t> syscall_write{0};
std::atomic<uint64_t> syscall_read{0};
std::atomic<uint64_t> tcp_read_alloc_8k{0};
std::atomic<uint64_t> tcp_read_alloc_64k{0};
std::atomic<uint64_t> http2_settings_writes{0};
std::atomic<uint64_t> http2_pings_sent{0};
std::atomic<uint64_t> http2_writes_begun{0};
std::atomic<uint64_t> http2_transport_stalls{0};
std::atomic<uint64_t> http2_stream_stalls{0};
std::atomic<uint64_t> cq_pluck_creates{0};
std::atomic<uint64_t> cq_next_creates{0};
std::atomic<uint64_t> cq_callback_creates{0};
HistogramCollector_32768_24 call_initial_size;
HistogramCollector_16777216_20 tcp_write_size;
HistogramCollector_80_10 tcp_write_iov_size;
HistogramCollector_16777216_20 tcp_read_size;
HistogramCollector_16777216_20 tcp_read_offer;
HistogramCollector_80_10 tcp_read_offer_iov_size;
HistogramCollector_16777216_20 http2_send_message_size;
};
PerCpu<Data> data_;
};
} // namespace grpc_core
extern const int grpc_stats_histo_buckets[7];
extern const int grpc_stats_histo_start[7];
extern const int* const grpc_stats_histo_bucket_boundaries[7];
extern int (*const grpc_stats_get_bucket[7])(int value);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */

@ -1,17 +0,0 @@
client_calls_created_per_iteration:FLOAT,
server_calls_created_per_iteration:FLOAT,
client_channels_created_per_iteration:FLOAT,
client_subchannels_created_per_iteration:FLOAT,
server_channels_created_per_iteration:FLOAT,
syscall_write_per_iteration:FLOAT,
syscall_read_per_iteration:FLOAT,
tcp_read_alloc_8k_per_iteration:FLOAT,
tcp_read_alloc_64k_per_iteration:FLOAT,
http2_settings_writes_per_iteration:FLOAT,
http2_pings_sent_per_iteration:FLOAT,
http2_writes_begun_per_iteration:FLOAT,
http2_transport_stalls_per_iteration:FLOAT,
http2_stream_stalls_per_iteration:FLOAT,
cq_pluck_creates_per_iteration:FLOAT,
cq_next_creates_per_iteration:FLOAT,
cq_callback_creates_per_iteration:FLOAT

@ -25,6 +25,7 @@
#include <grpc/support/log.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
namespace grpc_core {

@ -47,6 +47,7 @@
#include <grpc/support/cpu.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/manual_constructor.h"

@ -40,6 +40,7 @@
#include <grpc/support/log.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/block_annotate.h"

@ -31,6 +31,7 @@
#include <grpc/support/log_windows.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/gprpp/thd.h"
#include "src/core/lib/iomgr/iocp_windows.h"
#include "src/core/lib/iomgr/iomgr_internal.h"

@ -50,6 +50,7 @@
#include "src/core/lib/address_utils/sockaddr_utils.h"
#include "src/core/lib/debug/event_log.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/experiments/experiments.h"
#include "src/core/lib/gpr/string.h"
@ -917,11 +918,13 @@ static bool tcp_do_read(grpc_tcp* tcp, grpc_error_handle* error)
}
msg.msg_flags = 0;
GRPC_STATS_INC_TCP_READ_OFFER(tcp->incoming_buffer->length);
GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count);
grpc_core::global_stats().IncrementTcpReadOffer(
tcp->incoming_buffer->length);
grpc_core::global_stats().IncrementTcpReadOfferIovSize(
tcp->incoming_buffer->count);
do {
GRPC_STATS_INC_SYSCALL_READ();
grpc_core::global_stats().IncrementSyscallRead();
read_bytes = recvmsg(tcp->fd, &msg, 0);
} while (read_bytes < 0 && errno == EINTR);
@ -960,7 +963,7 @@ static bool tcp_do_read(grpc_tcp* tcp, grpc_error_handle* error)
return true;
}
GRPC_STATS_INC_TCP_READ_SIZE(read_bytes);
grpc_core::global_stats().IncrementTcpReadSize(read_bytes);
add_to_estimate(tcp, static_cast<size_t>(read_bytes));
GPR_DEBUG_ASSERT((size_t)read_bytes <=
tcp->incoming_buffer->length - total_read_bytes);
@ -1070,14 +1073,14 @@ static void maybe_make_read_slices(grpc_tcp* tcp)
extra_wanted -= kBigAlloc;
grpc_slice_buffer_add_indexed(tcp->incoming_buffer,
tcp->memory_owner.MakeSlice(kBigAlloc));
GRPC_STATS_INC_TCP_READ_ALLOC_64K();
grpc_core::global_stats().IncrementTcpReadAlloc64k();
}
} else {
while (extra_wanted > 0) {
extra_wanted -= kSmallAlloc;
grpc_slice_buffer_add_indexed(
tcp->incoming_buffer, tcp->memory_owner.MakeSlice(kSmallAlloc));
GRPC_STATS_INC_TCP_READ_ALLOC_8K();
grpc_core::global_stats().IncrementTcpReadAlloc8k();
}
}
maybe_post_reclaimer(tcp);
@ -1187,7 +1190,7 @@ ssize_t tcp_send(int fd, const struct msghdr* msg, int* saved_errno,
ssize_t sent_length;
do {
/* TODO(klempner): Cork if this is a partial write */
GRPC_STATS_INC_SYSCALL_WRITE();
grpc_core::global_stats().IncrementSyscallWrite();
sent_length = sendmsg(fd, msg, SENDMSG_FLAGS | additional_flags);
} while (sent_length < 0 && (*saved_errno = errno) == EINTR);
return sent_length;
@ -1618,8 +1621,8 @@ static bool do_tcp_flush_zerocopy(grpc_tcp* tcp, TcpZerocopySendRecord* record,
if (!tried_sending_message) {
msg.msg_control = nullptr;
msg.msg_controllen = 0;
GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length);
GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size);
grpc_core::global_stats().IncrementTcpWriteSize(sending_length);
grpc_core::global_stats().IncrementTcpWriteIovSize(iov_size);
sent_length = tcp_send(tcp->fd, &msg, &saved_errno, MSG_ZEROCOPY);
}
if (tcp->tcp_zerocopy_send_ctx.UpdateZeroCopyOMemStateAfterSend(
@ -1731,8 +1734,8 @@ static bool tcp_flush(grpc_tcp* tcp, grpc_error_handle* error) {
msg.msg_control = nullptr;
msg.msg_controllen = 0;
GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length);
GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size);
grpc_core::global_stats().IncrementTcpWriteSize(sending_length);
grpc_core::global_stats().IncrementTcpWriteIovSize(iov_size);
sent_length = tcp_send(tcp->fd, &msg, &saved_errno);
}

@ -64,6 +64,7 @@
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/compression/compression_internal.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/experiments/experiments.h"
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gpr/time_precise.h"
@ -637,7 +638,7 @@ grpc_error_handle FilterStackCall::Create(grpc_call_create_args* args,
grpc_error_handle error;
grpc_channel_stack* channel_stack = channel->channel_stack();
size_t initial_size = channel->CallSizeEstimate();
GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size);
global_stats().IncrementCallInitialSize(initial_size);
size_t call_alloc_size =
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(FilterStackCall)) +
channel_stack->call_stack_size;
@ -654,7 +655,7 @@ grpc_error_handle FilterStackCall::Create(grpc_call_create_args* args,
call->final_op_.client.status_details = nullptr;
call->final_op_.client.status = nullptr;
call->final_op_.client.error_string = nullptr;
GRPC_STATS_INC_CLIENT_CALLS_CREATED();
global_stats().IncrementClientCallsCreated();
path = CSliceRef(args->path->c_slice());
call->send_initial_metadata_.Set(HttpPathMetadata(),
std::move(*args->path));
@ -663,7 +664,7 @@ grpc_error_handle FilterStackCall::Create(grpc_call_create_args* args,
std::move(*args->authority));
}
} else {
GRPC_STATS_INC_SERVER_CALLS_CREATED();
global_stats().IncrementServerCallsCreated();
call->final_op_.server.cancelled = nullptr;
call->final_op_.server.core_server = args->server;
}
@ -2368,7 +2369,7 @@ class ClientPromiseBasedCall final : public PromiseBasedCall {
public:
ClientPromiseBasedCall(Arena* arena, grpc_call_create_args* args)
: PromiseBasedCall(arena, *args) {
GRPC_STATS_INC_CLIENT_CALLS_CREATED();
global_stats().IncrementClientCallsCreated();
ScopedContext context(this);
send_initial_metadata_ =
GetContext<FragmentAllocator>()->MakeClientMetadata();

@ -43,6 +43,7 @@
#include "src/core/lib/channel/channelz.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/manual_constructor.h"
@ -109,9 +110,9 @@ absl::StatusOr<RefCountedPtr<Channel>> Channel::CreateWithBuilder(
ChannelStackBuilder* builder) {
auto channel_args = builder->channel_args();
if (builder->channel_stack_type() == GRPC_SERVER_CHANNEL) {
GRPC_STATS_INC_SERVER_CHANNELS_CREATED();
global_stats().IncrementServerChannelsCreated();
} else {
GRPC_STATS_INC_CLIENT_CHANNELS_CREATED();
global_stats().IncrementClientChannelsCreated();
}
absl::StatusOr<RefCountedPtr<grpc_channel_stack>> r = builder->Build();
if (!r.ok()) {

@ -41,6 +41,7 @@
#include <grpc/support/sync.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/gpr/spinlock.h"
#include "src/core/lib/gprpp/atomic_utils.h"
#include "src/core/lib/gprpp/debug_location.h"
@ -513,13 +514,13 @@ grpc_completion_queue* grpc_completion_queue_create_internal(
switch (completion_type) {
case GRPC_CQ_NEXT:
GRPC_STATS_INC_CQ_NEXT_CREATES();
grpc_core::global_stats().IncrementCqNextCreates();
break;
case GRPC_CQ_PLUCK:
GRPC_STATS_INC_CQ_PLUCK_CREATES();
grpc_core::global_stats().IncrementCqPluckCreates();
break;
case GRPC_CQ_CALLBACK:
GRPC_STATS_INC_CQ_CALLBACK_CREATES();
grpc_core::global_stats().IncrementCqCallbackCreates();
break;
}

@ -1,97 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "src/cpp/util/core_stats.h"
#include <string.h>
#include <string>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
// IWYU pragma: no_include <google/protobuf/repeated_ptr_field.h>
using grpc::core::Bucket;
using grpc::core::Histogram;
using grpc::core::Metric;
using grpc::core::Stats;
namespace grpc {
void CoreStatsToProto(const grpc_stats_data& core, Stats* proto) {
for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
Metric* m = proto->add_metrics();
m->set_name(grpc_stats_counter_name[i]);
m->set_count(core.counters[i]);
}
for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
Metric* m = proto->add_metrics();
m->set_name(grpc_stats_histogram_name[i]);
Histogram* h = m->mutable_histogram();
for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) {
Bucket* b = h->add_buckets();
b->set_start(grpc_stats_histo_bucket_boundaries[i][j]);
b->set_count(core.histograms[grpc_stats_histo_start[i] + j]);
}
}
}
void ProtoToCoreStats(const grpc::core::Stats& proto, grpc_stats_data* core) {
memset(core, 0, sizeof(*core));
for (const auto& m : proto.metrics()) {
switch (m.value_case()) {
case Metric::VALUE_NOT_SET:
break;
case Metric::kCount:
for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
if (m.name() == grpc_stats_counter_name[i]) {
core->counters[i] = m.count();
break;
}
}
break;
case Metric::kHistogram:
for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
if (m.name() == grpc_stats_histogram_name[i]) {
const auto& h = m.histogram();
bool valid = true;
if (grpc_stats_histo_buckets[i] != h.buckets_size()) valid = false;
for (int j = 0; valid && j < h.buckets_size(); j++) {
if (grpc_stats_histo_bucket_boundaries[i][j] !=
h.buckets(j).start()) {
valid = false;
}
}
if (!valid) {
gpr_log(GPR_ERROR,
"Found histogram %s but shape is different from proto",
m.name().c_str());
}
for (int j = 0; valid && j < h.buckets_size(); j++) {
core->histograms[grpc_stats_histo_start[i] + j] =
h.buckets(j).count();
}
}
}
break;
}
}
}
} // namespace grpc

@ -1,32 +0,0 @@
/*
*
* Copyright 2016 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef GRPC_INTERNAL_CPP_UTIL_CORE_STATS_H
#define GRPC_INTERNAL_CPP_UTIL_CORE_STATS_H
#include "src/core/lib/debug/stats.h"
#include "src/proto/grpc/core/stats.pb.h"
namespace grpc {
void CoreStatsToProto(const grpc_stats_data& core, grpc::core::Stats* proto);
void ProtoToCoreStats(const grpc::core::Stats& proto, grpc_stats_data* core);
} // namespace grpc
#endif // GRPC_INTERNAL_CPP_UTIL_CORE_STATS_H

@ -1,38 +0,0 @@
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_package", "grpc_proto_library")
load("//bazel:python_rules.bzl", "py_proto_library")
licenses(["notice"])
grpc_package(
name = "core",
visibility = "public",
)
grpc_proto_library(
name = "stats_proto",
srcs = ["stats.proto"],
)
proto_library(
name = "stats_descriptor",
srcs = ["stats.proto"],
)
py_proto_library(
name = "stats_py_pb2",
deps = [":stats_descriptor"],
)

@ -172,9 +172,6 @@ grpc_proto_library(
name = "stats_proto",
srcs = ["stats.proto"],
has_services = False,
deps = [
"//src/proto/grpc/core:stats_proto",
],
)
grpc_proto_library(
@ -254,12 +251,6 @@ py_grpc_library(
deps = [":worker_service_py_pb2"],
)
proto_library(
name = "stats_descriptor",
srcs = ["stats.proto"],
deps = ["//src/proto/grpc/core:stats_descriptor"],
)
py_proto_library(
name = "stats_py_pb2",
deps = [":stats_descriptor"],

@ -16,8 +16,6 @@ syntax = "proto3";
package grpc.testing;
import "src/proto/grpc/core/stats.proto";
message ServerStats {
// wall clock time change in seconds since last reset
double time_elapsed = 1;
@ -37,9 +35,6 @@ message ServerStats {
// Number of polls called inside completion queue
uint64 cq_poll_count = 6;
// Core library stats
grpc.core.Stats core_stats = 7;
}
// Histogram params based on grpc/support/histogram.c
@ -77,7 +72,4 @@ message ClientStats {
// Number of polls called inside completion queue
uint64 cq_poll_count = 6;
// Core library stats
grpc.core.Stats core_stats = 7;
}

@ -441,6 +441,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/event_log.cc',
'src/core/lib/debug/histogram_view.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',

@ -64,7 +64,6 @@ py_library(
":benchmark_server",
":client_runner",
":histogram",
"//src/proto/grpc/core:stats_py_pb2",
"//src/proto/grpc/testing:benchmark_service_py_pb2_grpc",
"//src/proto/grpc/testing:control_py_pb2",
"//src/proto/grpc/testing:payloads_py_pb2",

@ -51,7 +51,6 @@ py_library(
deps = [
":benchmark_client",
":benchmark_servicer",
"//src/proto/grpc/core:stats_py_pb2",
"//src/proto/grpc/testing:benchmark_service_py_pb2_grpc",
"//src/proto/grpc/testing:control_py_pb2",
"//src/proto/grpc/testing:payloads_py_pb2",

@ -19,150 +19,73 @@
#include "src/core/lib/debug/stats.h"
#include <algorithm>
#include <map>
#include <memory>
#include <queue>
#include <random>
#include <thread>
#include <utility>
#include <vector>
#include "gtest/gtest.h"
#include <grpc/grpc.h>
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "test/core/util/test_config.h"
namespace grpc {
namespace grpc_core {
namespace testing {
class Snapshot {
public:
Snapshot() { grpc_stats_collect(&begin_); }
grpc_stats_data delta() {
grpc_stats_data now;
grpc_stats_collect(&now);
grpc_stats_data delta;
grpc_stats_diff(&now, &begin_, &delta);
return delta;
std::unique_ptr<GlobalStats> delta() {
auto now = global_stats().Collect();
return now->Diff(*begin_);
}
private:
grpc_stats_data begin_;
std::unique_ptr<GlobalStats> begin_ = global_stats().Collect();
};
TEST(StatsTest, IncCounters) {
for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
std::unique_ptr<Snapshot> snapshot(new Snapshot);
grpc_core::ExecCtx exec_ctx;
GRPC_STATS_INC_COUNTER((grpc_stats_counters)i);
EXPECT_EQ(snapshot->delta().counters[i], 1);
}
}
TEST(StatsTest, IncSpecificCounter) {
std::unique_ptr<Snapshot> snapshot(new Snapshot);
grpc_core::ExecCtx exec_ctx;
GRPC_STATS_INC_CLIENT_CALLS_CREATED();
ExecCtx exec_ctx;
global_stats().IncrementClientCallsCreated();
EXPECT_EQ(snapshot->delta().counters[GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED],
1);
EXPECT_EQ(snapshot->delta()->client_calls_created, 1);
}
static int FindExpectedBucket(int i, int j) {
if (j < 0) {
static int FindExpectedBucket(const HistogramView& h, int value) {
if (value < 0) {
return 0;
}
if (j >= grpc_stats_histo_bucket_boundaries[i][grpc_stats_histo_buckets[i]]) {
return grpc_stats_histo_buckets[i] - 1;
if (value >= h.bucket_boundaries[h.num_buckets]) {
return h.num_buckets - 1;
}
return std::upper_bound(grpc_stats_histo_bucket_boundaries[i],
grpc_stats_histo_bucket_boundaries[i] +
grpc_stats_histo_buckets[i],
j) -
grpc_stats_histo_bucket_boundaries[i] - 1;
return std::upper_bound(h.bucket_boundaries,
h.bucket_boundaries + h.num_buckets, value) -
h.bucket_boundaries - 1;
}
class HistogramTest : public ::testing::TestWithParam<int> {};
TEST_P(HistogramTest, CheckBucket) {
const int kHistogram = GetParam();
int max_bucket_boundary =
grpc_stats_histo_bucket_boundaries[kHistogram]
[grpc_stats_histo_buckets[kHistogram] -
1];
const GlobalStats::Histogram kHistogram =
static_cast<GlobalStats::Histogram>(GetParam());
auto some_stats = std::make_unique<GlobalStats>();
auto view = some_stats->histogram(kHistogram);
const int max_bucket_boundary = view.bucket_boundaries[view.num_buckets];
for (int i = -1000; i < max_bucket_boundary + 1000; i++) {
ASSERT_EQ(FindExpectedBucket(kHistogram, i),
grpc_stats_get_bucket[kHistogram](i))
ASSERT_EQ(FindExpectedBucket(view, i), view.bucket_for(i))
<< "i=" << i << " expect_bucket="
<< grpc_stats_histo_bucket_boundaries[kHistogram]
[FindExpectedBucket(kHistogram, i)]
<< " actual_bucket="
<< grpc_stats_histo_bucket_boundaries[kHistogram]
[grpc_stats_get_bucket[kHistogram](
i)];
}
}
TEST_P(HistogramTest, IncHistogram) {
const int kHistogram = GetParam();
std::queue<std::thread> threads;
auto run = [kHistogram](const std::vector<int>& test_values,
int expected_bucket) {
grpc_core::ExecCtx exec_ctx;
for (auto j : test_values) {
std::unique_ptr<Snapshot> snapshot(new Snapshot);
grpc_stats_inc_histogram_value(kHistogram, j);
auto delta = snapshot->delta();
EXPECT_EQ(
delta
.histograms[grpc_stats_histo_start[kHistogram] + expected_bucket],
1)
<< "\nhistogram:" << kHistogram
<< "\nexpected_bucket:" << expected_bucket << "\nj:" << j;
}
};
// largest bucket boundary for current histogram type.
int max_bucket_boundary =
grpc_stats_histo_bucket_boundaries[kHistogram]
[grpc_stats_histo_buckets[kHistogram] -
1];
std::map<int /* expected_bucket */, std::vector<int> /* test_values */>
test_values_by_expected_bucket;
std::random_device rd;
std::uniform_int_distribution<int> dist(-1000, max_bucket_boundary + 1000);
for (int i = 0; i < 100; i++) {
int j = dist(rd);
int expected_bucket = FindExpectedBucket(kHistogram, j);
test_values_by_expected_bucket[expected_bucket].push_back(j);
}
for (auto& p : test_values_by_expected_bucket) {
while (threads.size() >= 10) {
threads.front().join();
threads.pop();
}
threads.emplace(
[test_values = std::move(p.second), run,
cur_bucket = p.first]() mutable { run(test_values, cur_bucket); });
}
while (!threads.empty()) {
threads.front().join();
threads.pop();
<< view.bucket_boundaries[FindExpectedBucket(view, i)]
<< " actual_bucket=" << view.bucket_boundaries[view.bucket_for(i)];
}
}
INSTANTIATE_TEST_SUITE_P(HistogramTestCases, HistogramTest,
::testing::Range<int>(0, GRPC_STATS_HISTOGRAM_COUNT));
INSTANTIATE_TEST_SUITE_P(
HistogramTestCases, HistogramTest,
::testing::Range<int>(0, static_cast<int>(GlobalStats::Histogram::COUNT)));
} // namespace testing
} // namespace grpc
} // namespace grpc_core
int main(int argc, char** argv) {
grpc::testing::TestEnvironment env(&argc, argv);

@ -19,6 +19,8 @@
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <memory>
#include <string>
#include <grpc/grpc.h>
@ -26,10 +28,10 @@
#include <grpc/slice.h>
#include <grpc/status.h>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "test/core/end2end/cq_verifier.h"
#include "test/core/end2end/end2end_tests.h"
#include "test/core/util/test_config.h"
@ -116,12 +118,8 @@ static void simple_request_body(grpc_end2end_test_config config,
grpc_slice details;
int was_cancelled = 2;
char* peer;
grpc_stats_data* before =
static_cast<grpc_stats_data*>(gpr_malloc(sizeof(grpc_stats_data)));
grpc_stats_data* after =
static_cast<grpc_stats_data*>(gpr_malloc(sizeof(grpc_stats_data)));
grpc_stats_collect(before);
auto before = grpc_core::global_stats().Collect();
gpr_timespec deadline = five_seconds_from_now();
c = grpc_channel_create_call(f.client, nullptr, GRPC_PROPAGATE_DEFAULTS, f.cq,
@ -236,24 +234,19 @@ static void simple_request_body(grpc_end2end_test_config config,
grpc_call_unref(c);
grpc_call_unref(s);
int expected_calls = 1;
uint64_t expected_calls = 1;
if (config.feature_mask & FEATURE_MASK_SUPPORTS_REQUEST_PROXYING) {
expected_calls *= 2;
}
grpc_stats_collect(after);
auto after = grpc_core::global_stats().Collect();
gpr_log(GPR_DEBUG, "%s", grpc_stats_data_as_json(after).c_str());
gpr_log(GPR_DEBUG, "%s", grpc_core::StatsAsJson(after.get()).c_str());
GPR_ASSERT(after->counters[GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED] -
before->counters[GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED] ==
GPR_ASSERT(after->client_calls_created - before->client_calls_created ==
expected_calls);
GPR_ASSERT(after->counters[GRPC_STATS_COUNTER_SERVER_CALLS_CREATED] -
before->counters[GRPC_STATS_COUNTER_SERVER_CALLS_CREATED] ==
GPR_ASSERT(after->server_calls_created - before->server_calls_created ==
expected_calls);
gpr_free(before);
gpr_free(after);
}
static void test_invoke_simple_request(grpc_end2end_test_config config) {

@ -33,7 +33,6 @@ namespace grpc {
namespace testing {
static void BM_Alarm_Tag_Immediate(benchmark::State& state) {
TrackCounters track_counters;
CompletionQueue cq;
Alarm alarm;
void* output_tag;
@ -43,7 +42,6 @@ static void BM_Alarm_Tag_Immediate(benchmark::State& state) {
alarm.Set(&cq, deadline, nullptr);
cq.Next(&output_tag, &ok);
}
track_counters.Finish(state);
}
BENCHMARK(BM_Alarm_Tag_Immediate);

@ -60,12 +60,10 @@ static auto* g_memory_allocator = new grpc_core::MemoryAllocator(
void BM_Zalloc(benchmark::State& state) {
// speed of light for call creation is zalloc, so benchmark a few interesting
// sizes
TrackCounters track_counters;
size_t sz = state.range(0);
for (auto _ : state) {
gpr_free(gpr_zalloc(sz));
}
track_counters.Finish(state);
}
BENCHMARK(BM_Zalloc)
->Arg(64)
@ -116,7 +114,6 @@ class LameChannel : public BaseChannelFixture {
template <class Fixture>
static void BM_CallCreateDestroy(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
@ -128,7 +125,6 @@ static void BM_CallCreateDestroy(benchmark::State& state) {
deadline, nullptr));
}
grpc_completion_queue_destroy(cq);
track_counters.Finish(state);
}
BENCHMARK_TEMPLATE(BM_CallCreateDestroy, InsecureChannel);
@ -142,7 +138,6 @@ static void* tag(int i) {
}
static void BM_LameChannelCallCreateCpp(benchmark::State& state) {
TrackCounters track_counters;
auto stub =
grpc::testing::EchoTestService::NewStub(grpc::CreateChannelInternal(
"",
@ -163,15 +158,12 @@ static void BM_LameChannelCallCreateCpp(benchmark::State& state) {
GPR_ASSERT(cq.Next(&t, &ok));
GPR_ASSERT(ok);
}
track_counters.Finish(state);
}
BENCHMARK(BM_LameChannelCallCreateCpp);
static void do_nothing(void* /*ignored*/) {}
static void BM_LameChannelCallCreateCore(benchmark::State& state) {
TrackCounters track_counters;
grpc_channel* channel;
grpc_completion_queue* cq;
grpc_metadata_array initial_metadata_recv;
@ -238,13 +230,10 @@ static void BM_LameChannelCallCreateCore(benchmark::State& state) {
grpc_channel_destroy(channel);
grpc_completion_queue_destroy(cq);
grpc_slice_unref(send_request_slice);
track_counters.Finish(state);
}
BENCHMARK(BM_LameChannelCallCreateCore);
static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State& state) {
TrackCounters track_counters;
grpc_channel* channel;
grpc_completion_queue* cq;
grpc_metadata_array initial_metadata_recv;
@ -320,7 +309,6 @@ static void BM_LameChannelCallCreateCoreSeparateBatch(benchmark::State& state) {
grpc_channel_destroy(channel);
grpc_completion_queue_destroy(cq);
grpc_slice_unref(send_request_slice);
track_counters.Finish(state);
}
BENCHMARK(BM_LameChannelCallCreateCoreSeparateBatch);
@ -508,7 +496,6 @@ class SendEmptyMetadata {
// perform on said filter.
template <class Fixture, class TestOp>
static void BM_IsolatedFilter(benchmark::State& state) {
TrackCounters track_counters;
Fixture fixture;
std::ostringstream label;
FakeClientChannelFactory fake_client_channel_factory;
@ -582,7 +569,6 @@ static void BM_IsolatedFilter(benchmark::State& state) {
gpr_free(call_stack);
state.SetLabel(label.str());
track_counters.Finish(state);
}
typedef Fixture<nullptr, 0> NoFilter;
@ -701,7 +687,7 @@ static const grpc_channel_filter isolated_call_filter = {
"isolated_call_filter"};
} // namespace isolated_call_filter
class IsolatedCallFixture : public TrackCounters {
class IsolatedCallFixture {
public:
IsolatedCallFixture() {
// We are calling grpc_channel_stack_builder_create() instead of
@ -725,10 +711,9 @@ class IsolatedCallFixture : public TrackCounters {
cq_ = grpc_completion_queue_create_for_next(nullptr);
}
void Finish(benchmark::State& state) override {
void Finish(benchmark::State&) {
grpc_completion_queue_destroy(cq_);
grpc_channel_destroy(channel_);
TrackCounters::Finish(state);
}
grpc_channel* channel() const { return channel_; }

@ -59,19 +59,15 @@ static grpc_slice MakeSlice(const std::vector<uint8_t>& bytes) {
//
static void BM_HpackEncoderInitDestroy(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
for (auto _ : state) {
grpc_core::HPackCompressor c;
grpc_core::ExecCtx::Get()->Flush();
}
track_counters.Finish(state);
}
BENCHMARK(BM_HpackEncoderInitDestroy);
static void BM_HpackEncoderEncodeDeadline(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
grpc_core::Timestamp saved_now = grpc_core::Timestamp::Now();
@ -99,22 +95,11 @@ static void BM_HpackEncoderEncodeDeadline(benchmark::State& state) {
grpc_core::ExecCtx::Get()->Flush();
}
grpc_slice_buffer_destroy(&outbuf);
std::ostringstream label;
label << "framing_bytes/iter:"
<< (static_cast<double>(stats.framing_bytes) /
static_cast<double>(state.iterations()))
<< " header_bytes/iter:"
<< (static_cast<double>(stats.header_bytes) /
static_cast<double>(state.iterations()));
track_counters.AddLabel(label.str());
track_counters.Finish(state);
}
BENCHMARK(BM_HpackEncoderEncodeDeadline);
template <class Fixture>
static void BM_HpackEncoderEncodeHeader(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
static bool logged_representative_output = false;
@ -150,16 +135,6 @@ static void BM_HpackEncoderEncodeHeader(benchmark::State& state) {
grpc_core::ExecCtx::Get()->Flush();
}
grpc_slice_buffer_destroy(&outbuf);
std::ostringstream label;
label << "framing_bytes/iter:"
<< (static_cast<double>(stats.framing_bytes) /
static_cast<double>(state.iterations()))
<< " header_bytes/iter:"
<< (static_cast<double>(stats.header_bytes) /
static_cast<double>(state.iterations()));
track_counters.AddLabel(label.str());
track_counters.Finish(state);
}
namespace hpack_encoder_fixtures {
@ -346,20 +321,16 @@ BENCHMARK_TEMPLATE(BM_HpackEncoderEncodeHeader,
//
static void BM_HpackParserInitDestroy(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
for (auto _ : state) {
{ grpc_core::HPackParser(); }
grpc_core::ExecCtx::Get()->Flush();
}
track_counters.Finish(state);
}
BENCHMARK(BM_HpackParserInitDestroy);
template <class Fixture>
static void BM_HpackParserParseHeader(benchmark::State& state) {
TrackCounters track_counters;
std::vector<grpc_slice> init_slices = Fixture::GetInitSlices();
std::vector<grpc_slice> benchmark_slices = Fixture::GetBenchmarkSlices();
grpc_core::ExecCtx exec_ctx;
@ -402,8 +373,6 @@ static void BM_HpackParserParseHeader(benchmark::State& state) {
for (auto slice : init_slices) grpc_slice_unref(slice);
for (auto slice : benchmark_slices) grpc_slice_unref(slice);
arena->Destroy();
track_counters.Finish(state);
}
namespace hpack_parser_fixtures {

@ -265,7 +265,6 @@ std::vector<std::unique_ptr<gpr_event>> done_events;
static void BM_StreamCreateDestroy(benchmark::State& state) {
grpc_core::ExecCtx exec_ctx;
TrackCounters track_counters;
Fixture f(grpc::ChannelArguments(), true);
auto* s = new Stream(&f);
grpc_transport_stream_op_batch op;
@ -286,7 +285,6 @@ static void BM_StreamCreateDestroy(benchmark::State& state) {
});
grpc_core::Closure::Run(DEBUG_LOCATION, next.get(), absl::OkStatus());
f.FlushExecCtx();
track_counters.Finish(state);
}
BENCHMARK(BM_StreamCreateDestroy);
@ -318,7 +316,6 @@ class RepresentativeClientInitialMetadata {
template <class Metadata>
static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
Fixture f(grpc::ChannelArguments(), true);
auto* s = new Stream(&f);
@ -362,13 +359,11 @@ static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State& state) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, start.get(), absl::OkStatus());
f.FlushExecCtx();
gpr_event_wait(&bm_done, gpr_inf_future(GPR_CLOCK_REALTIME));
track_counters.Finish(state);
}
BENCHMARK_TEMPLATE(BM_StreamCreateSendInitialMetadataDestroy,
RepresentativeClientInitialMetadata);
static void BM_TransportEmptyOp(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
Fixture f(grpc::ChannelArguments(), true);
auto* s = new Stream(&f);
@ -406,7 +401,6 @@ static void BM_TransportEmptyOp(benchmark::State& state) {
s->DestroyThen(
MakeOnceClosure([s](grpc_error_handle /*error*/) { delete s; }));
f.FlushExecCtx();
track_counters.Finish(state);
}
BENCHMARK(BM_TransportEmptyOp);

@ -33,40 +33,32 @@
#include "test/cpp/util/test_config.h"
static void BM_NoOpExecCtx(benchmark::State& state) {
TrackCounters track_counters;
for (auto _ : state) {
grpc_core::ExecCtx exec_ctx;
}
track_counters.Finish(state);
}
BENCHMARK(BM_NoOpExecCtx);
static void BM_WellFlushed(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
for (auto _ : state) {
grpc_core::ExecCtx::Get()->Flush();
}
track_counters.Finish(state);
}
BENCHMARK(BM_WellFlushed);
static void DoNothing(void* /*arg*/, grpc_error_handle /*error*/) {}
static void BM_ClosureInitAgainstExecCtx(benchmark::State& state) {
TrackCounters track_counters;
grpc_closure c;
for (auto _ : state) {
benchmark::DoNotOptimize(
GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx));
}
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureInitAgainstExecCtx);
static void BM_ClosureInitAgainstCombiner(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::Combiner* combiner = grpc_combiner_create();
grpc_closure c;
grpc_core::ExecCtx exec_ctx;
@ -75,26 +67,20 @@ static void BM_ClosureInitAgainstCombiner(benchmark::State& state) {
GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, nullptr));
}
GRPC_COMBINER_UNREF(combiner, "finished");
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureInitAgainstCombiner);
static void BM_ClosureRun(benchmark::State& state) {
TrackCounters track_counters;
grpc_closure c;
GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
grpc_core::ExecCtx exec_ctx;
for (auto _ : state) {
grpc_core::Closure::Run(DEBUG_LOCATION, &c, absl::OkStatus());
}
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureRun);
static void BM_ClosureCreateAndRun(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
for (auto _ : state) {
grpc_core::Closure::Run(
@ -102,13 +88,10 @@ static void BM_ClosureCreateAndRun(benchmark::State& state) {
GRPC_CLOSURE_CREATE(DoNothing, nullptr, grpc_schedule_on_exec_ctx),
absl::OkStatus());
}
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureCreateAndRun);
static void BM_ClosureInitAndRun(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
grpc_closure c;
for (auto _ : state) {
@ -117,13 +100,10 @@ static void BM_ClosureInitAndRun(benchmark::State& state) {
GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx),
absl::OkStatus());
}
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureInitAndRun);
static void BM_ClosureSchedOnExecCtx(benchmark::State& state) {
TrackCounters track_counters;
grpc_closure c;
GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
grpc_core::ExecCtx exec_ctx;
@ -131,13 +111,10 @@ static void BM_ClosureSchedOnExecCtx(benchmark::State& state) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, &c, absl::OkStatus());
grpc_core::ExecCtx::Get()->Flush();
}
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureSchedOnExecCtx);
static void BM_ClosureSched2OnExecCtx(benchmark::State& state) {
TrackCounters track_counters;
grpc_closure c1;
grpc_closure c2;
GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
@ -148,13 +125,10 @@ static void BM_ClosureSched2OnExecCtx(benchmark::State& state) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, &c2, absl::OkStatus());
grpc_core::ExecCtx::Get()->Flush();
}
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureSched2OnExecCtx);
static void BM_ClosureSched3OnExecCtx(benchmark::State& state) {
TrackCounters track_counters;
grpc_closure c1;
grpc_closure c2;
grpc_closure c3;
@ -168,13 +142,10 @@ static void BM_ClosureSched3OnExecCtx(benchmark::State& state) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, &c3, absl::OkStatus());
grpc_core::ExecCtx::Get()->Flush();
}
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureSched3OnExecCtx);
static void BM_AcquireMutex(benchmark::State& state) {
TrackCounters track_counters;
// for comparison with the combiner stuff below
gpr_mu mu;
gpr_mu_init(&mu);
@ -185,13 +156,10 @@ static void BM_AcquireMutex(benchmark::State& state) {
gpr_mu_unlock(&mu);
}
gpr_mu_destroy(&mu);
track_counters.Finish(state);
}
BENCHMARK(BM_AcquireMutex);
static void BM_TryAcquireMutex(benchmark::State& state) {
TrackCounters track_counters;
// for comparison with the combiner stuff below
gpr_mu mu;
gpr_mu_init(&mu);
@ -205,13 +173,10 @@ static void BM_TryAcquireMutex(benchmark::State& state) {
}
}
gpr_mu_destroy(&mu);
track_counters.Finish(state);
}
BENCHMARK(BM_TryAcquireMutex);
static void BM_AcquireSpinlock(benchmark::State& state) {
TrackCounters track_counters;
// for comparison with the combiner stuff below
gpr_spinlock mu = GPR_SPINLOCK_INITIALIZER;
grpc_core::ExecCtx exec_ctx;
@ -220,13 +185,10 @@ static void BM_AcquireSpinlock(benchmark::State& state) {
DoNothing(nullptr, absl::OkStatus());
gpr_spinlock_unlock(&mu);
}
track_counters.Finish(state);
}
BENCHMARK(BM_AcquireSpinlock);
static void BM_TryAcquireSpinlock(benchmark::State& state) {
TrackCounters track_counters;
// for comparison with the combiner stuff below
gpr_spinlock mu = GPR_SPINLOCK_INITIALIZER;
grpc_core::ExecCtx exec_ctx;
@ -238,13 +200,10 @@ static void BM_TryAcquireSpinlock(benchmark::State& state) {
abort();
}
}
track_counters.Finish(state);
}
BENCHMARK(BM_TryAcquireSpinlock);
static void BM_ClosureSchedOnCombiner(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::Combiner* combiner = grpc_combiner_create();
grpc_closure c;
GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, nullptr);
@ -254,13 +213,10 @@ static void BM_ClosureSchedOnCombiner(benchmark::State& state) {
grpc_core::ExecCtx::Get()->Flush();
}
GRPC_COMBINER_UNREF(combiner, "finished");
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureSchedOnCombiner);
static void BM_ClosureSched2OnCombiner(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::Combiner* combiner = grpc_combiner_create();
grpc_closure c1;
grpc_closure c2;
@ -273,13 +229,10 @@ static void BM_ClosureSched2OnCombiner(benchmark::State& state) {
grpc_core::ExecCtx::Get()->Flush();
}
GRPC_COMBINER_UNREF(combiner, "finished");
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureSched2OnCombiner);
static void BM_ClosureSched3OnCombiner(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::Combiner* combiner = grpc_combiner_create();
grpc_closure c1;
grpc_closure c2;
@ -295,13 +248,10 @@ static void BM_ClosureSched3OnCombiner(benchmark::State& state) {
grpc_core::ExecCtx::Get()->Flush();
}
GRPC_COMBINER_UNREF(combiner, "finished");
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureSched3OnCombiner);
static void BM_ClosureSched2OnTwoCombiners(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::Combiner* combiner1 = grpc_combiner_create();
grpc_core::Combiner* combiner2 = grpc_combiner_create();
grpc_closure c1;
@ -316,13 +266,10 @@ static void BM_ClosureSched2OnTwoCombiners(benchmark::State& state) {
}
GRPC_COMBINER_UNREF(combiner1, "finished");
GRPC_COMBINER_UNREF(combiner2, "finished");
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureSched2OnTwoCombiners);
static void BM_ClosureSched4OnTwoCombiners(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::Combiner* combiner1 = grpc_combiner_create();
grpc_core::Combiner* combiner2 = grpc_combiner_create();
grpc_closure c1;
@ -343,8 +290,6 @@ static void BM_ClosureSched4OnTwoCombiners(benchmark::State& state) {
}
GRPC_COMBINER_UNREF(combiner1, "finished");
GRPC_COMBINER_UNREF(combiner2, "finished");
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureSched4OnTwoCombiners);
@ -380,12 +325,10 @@ class Rescheduler {
};
static void BM_ClosureReschedOnExecCtx(benchmark::State& state) {
TrackCounters track_counters;
grpc_core::ExecCtx exec_ctx;
Rescheduler r(state);
r.ScheduleFirst();
grpc_core::ExecCtx::Get()->Flush();
track_counters.Finish(state);
}
BENCHMARK(BM_ClosureReschedOnExecCtx);

@ -35,35 +35,29 @@ namespace grpc {
namespace testing {
static void BM_CreateDestroyCpp(benchmark::State& state) {
TrackCounters track_counters;
for (auto _ : state) {
CompletionQueue cq;
}
track_counters.Finish(state);
}
BENCHMARK(BM_CreateDestroyCpp);
/* Create cq using a different constructor */
static void BM_CreateDestroyCpp2(benchmark::State& state) {
TrackCounters track_counters;
for (auto _ : state) {
grpc_completion_queue* core_cq =
grpc_completion_queue_create_for_next(nullptr);
CompletionQueue cq(core_cq);
}
track_counters.Finish(state);
}
BENCHMARK(BM_CreateDestroyCpp2);
static void BM_CreateDestroyCore(benchmark::State& state) {
TrackCounters track_counters;
for (auto _ : state) {
// TODO(sreek): Templatize this benchmark and pass completion type and
// polling type as parameters
grpc_completion_queue_destroy(
grpc_completion_queue_create_for_next(nullptr));
}
track_counters.Finish(state);
}
BENCHMARK(BM_CreateDestroyCore);
@ -83,7 +77,6 @@ class PhonyTag final : public internal::CompletionQueueTag {
};
static void BM_Pass1Cpp(benchmark::State& state) {
TrackCounters track_counters;
CompletionQueue cq;
grpc_completion_queue* c_cq = cq.cq();
for (auto _ : state) {
@ -98,12 +91,10 @@ static void BM_Pass1Cpp(benchmark::State& state) {
bool ok;
cq.Next(&tag, &ok);
}
track_counters.Finish(state);
}
BENCHMARK(BM_Pass1Cpp);
static void BM_Pass1Core(benchmark::State& state) {
TrackCounters track_counters;
// TODO(sreek): Templatize this benchmark and pass polling_type as a param
grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
@ -117,12 +108,10 @@ static void BM_Pass1Core(benchmark::State& state) {
grpc_completion_queue_next(cq, deadline, nullptr);
}
grpc_completion_queue_destroy(cq);
track_counters.Finish(state);
}
BENCHMARK(BM_Pass1Core);
static void BM_Pluck1Core(benchmark::State& state) {
TrackCounters track_counters;
// TODO(sreek): Templatize this benchmark and pass polling_type as a param
grpc_completion_queue* cq = grpc_completion_queue_create_for_pluck(nullptr);
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
@ -136,12 +125,10 @@ static void BM_Pluck1Core(benchmark::State& state) {
grpc_completion_queue_pluck(cq, nullptr, deadline, nullptr);
}
grpc_completion_queue_destroy(cq);
track_counters.Finish(state);
}
BENCHMARK(BM_Pluck1Core);
static void BM_EmptyCore(benchmark::State& state) {
TrackCounters track_counters;
// TODO(sreek): Templatize this benchmark and pass polling_type as a param
grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
gpr_timespec deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
@ -149,7 +136,6 @@ static void BM_EmptyCore(benchmark::State& state) {
grpc_completion_queue_next(cq, deadline, nullptr);
}
grpc_completion_queue_destroy(cq);
track_counters.Finish(state);
}
BENCHMARK(BM_EmptyCore);
@ -202,7 +188,6 @@ class ShutdownCallback : public grpc_completion_queue_functor {
};
static void BM_Callback_CQ_Pass1Core(benchmark::State& state) {
TrackCounters track_counters;
int iteration = 0, current_iterations = 0;
TagCallback tag_cb(&iteration);
gpr_mu_init(&mu);
@ -252,14 +237,12 @@ static void BM_Callback_CQ_Pass1Core(benchmark::State& state) {
GPR_ASSERT(got_shutdown);
GPR_ASSERT(iteration == static_cast<int>(state.iterations()));
track_counters.Finish(state);
gpr_cv_destroy(&cv);
gpr_mu_destroy(&mu);
gpr_cv_destroy(&shutdown_cv);
gpr_mu_destroy(&shutdown_mu);
}
static void BM_Callback_CQ_Pass1CoreHeapCompletion(benchmark::State& state) {
TrackCounters track_counters;
int iteration = 0, current_iterations = 0;
TagCallback tag_cb(&iteration);
gpr_mu_init(&mu);
@ -297,7 +280,6 @@ static void BM_Callback_CQ_Pass1CoreHeapCompletion(benchmark::State& state) {
GPR_ASSERT(got_shutdown);
GPR_ASSERT(iteration == static_cast<int>(state.iterations()));
track_counters.Finish(state);
gpr_cv_destroy(&cv);
gpr_mu_destroy(&mu);
gpr_cv_destroy(&shutdown_cv);

@ -174,17 +174,12 @@ static void BM_Cq_Throughput(benchmark::State& state) {
}
gpr_mu_unlock(&g_mu);
// Use a TrackCounters object to monitor the gRPC performance statistics
// (optionally including low-level counters) before and after the test
TrackCounters track_counters;
for (auto _ : state) {
GPR_ASSERT(grpc_completion_queue_next(g_cq, deadline, nullptr).type ==
GRPC_OP_COMPLETE);
}
state.SetItemsProcessed(state.iterations());
track_counters.Finish(state);
gpr_mu_lock(&g_mu);
g_threads_active--;

@ -47,7 +47,6 @@ static void shutdown_ps(void* ps, grpc_error_handle /*error*/) {
}
static void BM_CreateDestroyPollset(benchmark::State& state) {
TrackCounters track_counters;
size_t ps_sz = grpc_pollset_size();
grpc_pollset* ps = static_cast<grpc_pollset*>(gpr_malloc(ps_sz));
gpr_mu* mu;
@ -65,7 +64,6 @@ static void BM_CreateDestroyPollset(benchmark::State& state) {
}
grpc_core::ExecCtx::Get()->Flush();
gpr_free(ps);
track_counters.Finish(state);
}
BENCHMARK(BM_CreateDestroyPollset);
@ -73,7 +71,6 @@ BENCHMARK(BM_CreateDestroyPollset);
static void BM_PollEmptyPollset_SpeedOfLight(benchmark::State& state) {
// equivalent to BM_PollEmptyPollset, but just use the OS primitives to guage
// what the speed of light would be if we abstracted perfectly
TrackCounters track_counters;
int epfd = epoll_create1(0);
GPR_ASSERT(epfd != -1);
size_t nev = state.range(0);
@ -94,7 +91,6 @@ static void BM_PollEmptyPollset_SpeedOfLight(benchmark::State& state) {
}
close(epfd);
delete[] ev;
track_counters.Finish(state);
}
BENCHMARK(BM_PollEmptyPollset_SpeedOfLight)
->Args({1, 0})
@ -110,7 +106,6 @@ BENCHMARK(BM_PollEmptyPollset_SpeedOfLight)
#endif
static void BM_PollEmptyPollset(benchmark::State& state) {
TrackCounters track_counters;
size_t ps_sz = grpc_pollset_size();
grpc_pollset* ps = static_cast<grpc_pollset*>(gpr_zalloc(ps_sz));
gpr_mu* mu;
@ -127,12 +122,10 @@ static void BM_PollEmptyPollset(benchmark::State& state) {
gpr_mu_unlock(mu);
grpc_core::ExecCtx::Get()->Flush();
gpr_free(ps);
track_counters.Finish(state);
}
BENCHMARK(BM_PollEmptyPollset);
static void BM_PollAddFd(benchmark::State& state) {
TrackCounters track_counters;
size_t ps_sz = grpc_pollset_size();
grpc_pollset* ps = static_cast<grpc_pollset*>(gpr_zalloc(ps_sz));
gpr_mu* mu;
@ -155,7 +148,6 @@ static void BM_PollAddFd(benchmark::State& state) {
gpr_mu_unlock(mu);
grpc_core::ExecCtx::Get()->Flush();
gpr_free(ps);
track_counters.Finish(state);
}
BENCHMARK(BM_PollAddFd);
@ -181,7 +173,6 @@ TestClosure* MakeTestClosure(F f) {
static void BM_SingleThreadPollOneFd_SpeedOfLight(benchmark::State& state) {
// equivalent to BM_PollEmptyPollset, but just use the OS primitives to guage
// what the speed of light would be if we abstracted perfectly
TrackCounters track_counters;
int epfd = epoll_create1(0);
GPR_ASSERT(epfd != -1);
epoll_event ev[100];
@ -206,13 +197,11 @@ static void BM_SingleThreadPollOneFd_SpeedOfLight(benchmark::State& state) {
}
close(fd);
close(epfd);
track_counters.Finish(state);
}
BENCHMARK(BM_SingleThreadPollOneFd_SpeedOfLight);
#endif
static void BM_SingleThreadPollOneFd(benchmark::State& state) {
TrackCounters track_counters;
size_t ps_sz = grpc_pollset_size();
grpc_pollset* ps = static_cast<grpc_pollset*>(gpr_zalloc(ps_sz));
gpr_mu* mu;
@ -248,7 +237,6 @@ static void BM_SingleThreadPollOneFd(benchmark::State& state) {
grpc_core::ExecCtx::Get()->Flush();
grpc_wakeup_fd_destroy(&wakeup_fd);
gpr_free(ps);
track_counters.Finish(state);
delete continue_closure;
}
BENCHMARK(BM_SingleThreadPollOneFd);

@ -138,7 +138,6 @@ static void BM_CallbackBidiStreaming(benchmark::State& state) {
BidiClient test{&state, stub_.get(), &cli_ctx, &request, &response};
test.Await();
}
fixture->Finish(state);
fixture.reset();
state.SetBytesProcessed(2 * message_size * max_ping_pongs *
state.iterations());

@ -89,7 +89,6 @@ static void BM_CallbackUnaryPingPong(benchmark::State& state) {
while (!done) {
cv.wait(l);
}
fixture->Finish(state);
fixture.reset();
state.SetBytesProcessed(request_msgs_size * state.iterations() +
response_msgs_size * state.iterations());

@ -61,7 +61,10 @@ class FixtureConfiguration {
}
};
class BaseFixture : public TrackCounters {};
class BaseFixture {
public:
virtual ~BaseFixture() = default;
};
class FullstackFixture : public BaseFixture {
public:
@ -94,13 +97,6 @@ class FullstackFixture : public BaseFixture {
}
}
void AddToLabel(std::ostream& out, benchmark::State& state) override {
BaseFixture::AddToLabel(out, state);
out << " polls/iter:"
<< static_cast<double>(grpc_get_cq_poll_num(this->cq()->cq())) /
state.iterations();
}
ServerCompletionQueue* cq() { return cq_.get(); }
std::shared_ptr<Channel> channel() { return channel_; }
@ -226,13 +222,6 @@ class EndpointPairFixture : public BaseFixture {
}
}
void AddToLabel(std::ostream& out, benchmark::State& state) override {
BaseFixture::AddToLabel(out, state);
out << " polls/iter:"
<< static_cast<double>(grpc_get_cq_poll_num(this->cq()->cq())) /
state.iterations();
}
ServerCompletionQueue* cq() { return cq_.get(); }
std::shared_ptr<Channel> channel() { return channel_; }
@ -276,13 +265,6 @@ class InProcessCHTTP2WithExplicitStats : public EndpointPairFixture {
}
}
void AddToLabel(std::ostream& out, benchmark::State& state) override {
EndpointPairFixture::AddToLabel(out, state);
out << " writes/iter:"
<< static_cast<double>(gpr_atm_no_barrier_load(&stats_->num_writes)) /
static_cast<double>(state.iterations());
}
private:
grpc_passthru_endpoint_stats* stats_;

@ -132,7 +132,6 @@ static void BM_StreamingPingPong(benchmark::State& state) {
}
}
fixture->Finish(state);
fixture.reset();
state.SetBytesProcessed(msg_size * state.iterations() * max_ping_pongs * 2);
}
@ -218,7 +217,6 @@ static void BM_StreamingPingPongMsgs(benchmark::State& state) {
GPR_ASSERT(recv_status.ok());
}
fixture->Finish(state);
fixture.reset();
state.SetBytesProcessed(msg_size * state.iterations() * 2);
}
@ -394,7 +392,6 @@ static void BM_StreamingPingPongWithCoalescingApi(benchmark::State& state) {
}
}
fixture->Finish(state);
fixture.reset();
state.SetBytesProcessed(msg_size * state.iterations() * max_ping_pongs * 2);
}

@ -101,7 +101,6 @@ static void BM_PumpStreamClientToServer(benchmark::State& state) {
}
GPR_ASSERT(final_status.ok());
}
fixture->Finish(state);
fixture.reset();
state.SetBytesProcessed(state.range(0) * state.iterations());
}
@ -158,7 +157,6 @@ static void BM_PumpStreamServerToClient(benchmark::State& state) {
need_tags &= ~(1 << i);
}
}
fixture->Finish(state);
fixture.reset();
state.SetBytesProcessed(state.range(0) * state.iterations());
}

@ -102,7 +102,6 @@ static void BM_UnaryPingPong(benchmark::State& state) {
service.RequestEcho(&senv->ctx, &senv->recv_request, &senv->response_writer,
fixture->cq(), fixture->cq(), tag(slot));
}
fixture->Finish(state);
fixture.reset();
server_env[0]->~ServerEnv();
server_env[1]->~ServerEnv();

@ -40,40 +40,3 @@ LibraryInitializer& LibraryInitializer::get() {
GPR_ASSERT(g_libraryInitializer != nullptr);
return *g_libraryInitializer;
}
void TrackCounters::Finish(benchmark::State& state) {
std::ostringstream out;
for (const auto& l : labels_) {
out << l << ' ';
}
AddToLabel(out, state);
std::string label = out.str();
if (label.length() && label[0] == ' ') {
label = label.substr(1);
}
state.SetLabel(label.c_str());
}
void TrackCounters::AddLabel(const std::string& label) {
labels_.push_back(label);
}
void TrackCounters::AddToLabel(std::ostream& out, benchmark::State& state) {
grpc_stats_data stats_end;
grpc_stats_collect(&stats_end);
grpc_stats_data stats;
grpc_stats_diff(&stats_end, &stats_begin_, &stats);
for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
out << " " << grpc_stats_counter_name[i] << "/iter:"
<< (static_cast<double>(stats.counters[i]) /
static_cast<double>(state.iterations()));
}
for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
out << " " << grpc_stats_histogram_name[i] << "-median:"
<< grpc_stats_histo_percentile(
&stats, static_cast<grpc_stats_histograms>(i), 50.0)
<< " " << grpc_stats_histogram_name[i] << "-99p:"
<< grpc_stats_histo_percentile(
&stats, static_cast<grpc_stats_histograms>(i), 99.0);
}
}

@ -29,6 +29,7 @@
#include <grpcpp/impl/grpc_library.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
class LibraryInitializer {
public:
@ -41,17 +42,4 @@ class LibraryInitializer {
grpc::internal::GrpcLibrary init_lib_;
};
class TrackCounters {
public:
TrackCounters() { grpc_stats_collect(&stats_begin_); }
virtual ~TrackCounters() {}
virtual void Finish(benchmark::State& state);
virtual void AddLabel(const std::string& label);
virtual void AddToLabel(std::ostream& out, benchmark::State& state);
private:
grpc_stats_data stats_begin_;
std::vector<std::string> labels_;
};
#endif

@ -37,6 +37,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/thd.h"

@ -52,7 +52,6 @@ grpc_cc_library(
":usage_timer",
"//:grpc",
"//:grpc++",
"//:grpc++_core_stats",
"//src/proto/grpc/testing:benchmark_service_proto",
"//src/proto/grpc/testing:control_proto",
"//src/proto/grpc/testing:payloads_proto",

@ -19,6 +19,8 @@
#ifndef TEST_QPS_CLIENT_H
#define TEST_QPS_CLIENT_H
#include <inttypes.h>
#include <stdint.h>
#include <stdlib.h>
#include <condition_variable>
@ -38,7 +40,6 @@
#include <grpcpp/support/slice.h>
#include "src/core/lib/gprpp/env.h"
#include "src/cpp/util/core_stats.h"
#include "src/proto/grpc/testing/benchmark_service.grpc.pb.h"
#include "src/proto/grpc/testing/payloads.pb.h"
#include "test/cpp/qps/histogram.h"
@ -206,9 +207,6 @@ class Client {
}
}
grpc_stats_data core_stats;
grpc_stats_collect(&core_stats);
ClientStats stats;
latencies.FillProto(stats.mutable_latencies());
for (StatusHistogram::const_iterator it = statuses.begin();
@ -221,7 +219,6 @@ class Client {
stats.set_time_system(timer_result.system);
stats.set_time_user(timer_result.user);
stats.set_cq_poll_count(poll_count);
CoreStatsToProto(core_stats, stats.mutable_core_stats());
return stats;
}

@ -23,7 +23,6 @@
#include <grpc/support/log.h>
#include <grpcpp/client_context.h>
#include "src/cpp/util/core_stats.h"
#include "src/proto/grpc/testing/report_qps_scenario_service.grpc.pb.h"
#include "test/cpp/qps/driver.h"
#include "test/cpp/qps/parse_json.h"
@ -86,36 +85,6 @@ void GprLogReporter::ReportQPS(const ScenarioResult& result) {
gpr_log(GPR_INFO, "successful requests/second: %.1f",
result.summary().successful_requests_per_second());
}
for (int i = 0; i < result.client_stats_size(); i++) {
if (result.client_stats(i).has_core_stats()) {
ReportCoreStats("CLIENT", i, result.client_stats(i).core_stats());
}
}
for (int i = 0; i < result.server_stats_size(); i++) {
if (result.server_stats(i).has_core_stats()) {
ReportCoreStats("SERVER", i, result.server_stats(i).core_stats());
}
}
}
void GprLogReporter::ReportCoreStats(const char* name, int idx,
const grpc::core::Stats& stats) {
grpc_stats_data data;
ProtoToCoreStats(stats, &data);
for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
gpr_log(GPR_DEBUG, "%s[%d].%s = %" PRIdPTR, name, idx,
grpc_stats_counter_name[i], data.counters[i]);
}
for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
gpr_log(GPR_DEBUG, "%s[%d].%s = %.1lf/%.1lf/%.1lf (50/95/99%%-ile)", name,
idx, grpc_stats_histogram_name[i],
grpc_stats_histo_percentile(
&data, static_cast<grpc_stats_histograms>(i), 50),
grpc_stats_histo_percentile(
&data, static_cast<grpc_stats_histograms>(i), 95),
grpc_stats_histo_percentile(
&data, static_cast<grpc_stats_histograms>(i), 99));
}
}
void GprLogReporter::ReportQPSPerCore(const ScenarioResult& result) {

@ -103,9 +103,6 @@ class GprLogReporter : public Reporter {
void ReportCpuUsage(const ScenarioResult& result) override;
void ReportPollCount(const ScenarioResult& result) override;
void ReportQueriesPerCpuSec(const ScenarioResult& result) override;
void ReportCoreStats(const char* name, int idx,
const grpc::core::Stats& stats);
};
/** Dumps the report to a JSON file. */

@ -28,7 +28,6 @@
#include <grpcpp/security/server_credentials.h>
#include <grpcpp/server_builder.h>
#include "src/cpp/util/core_stats.h"
#include "src/proto/grpc/testing/control.pb.h"
#include "src/proto/grpc/testing/messages.pb.h"
#include "test/core/end2end/data/ssl_test_data.h"
@ -65,9 +64,6 @@ class Server {
timer_result = timer_->Mark();
}
grpc_stats_data core_stats;
grpc_stats_collect(&core_stats);
ServerStats stats;
stats.set_time_elapsed(timer_result.wall);
stats.set_time_system(timer_result.system);
@ -75,7 +71,6 @@ class Server {
stats.set_total_cpu_time(timer_result.total_cpu_time);
stats.set_idle_cpu_time(timer_result.idle_cpu_time);
stats.set_cq_poll_count(poll_count);
CoreStatsToProto(core_stats, stats.mutable_core_stats());
return stats;
}

@ -206,14 +206,12 @@ def gen_bucket_code(shape):
type_for_uint_table(map_table))
last_code = (
(len(map_table) - 1) << shift_data[0]) + first_nontrivial_code
code += '// first_nontrivial_code=%d\n// last_code=%d [%f]\n' % (
first_nontrivial_code, last_code, u642dbl(last_code))
code += 'DblUint val;\n'
code += 'val.dbl = value;\n'
code += 'const int bucket = '
code += 'grpc_stats_table_%d[((val.uint - %dull) >> %d)];\n' % (
code += 'kStatsTable%d[((val.uint - %dull) >> %d)];\n' % (
map_table_idx, first_nontrivial_code, shift_data[0])
code += 'return bucket - (value < grpc_stats_table_%d[bucket]);' % bounds_idx
code += 'return bucket - (value < kStatsTable%d[bucket]);' % bounds_idx
cases.append((int(u642dbl(last_code)) + 1, code))
first_nontrivial_code = last_code
last = u642dbl(last_code) + 1
@ -239,6 +237,11 @@ shapes = set()
for histogram in inst_map['Histogram']:
shapes.add(Shape(max=histogram.max, buckets=histogram.buckets))
def snake_to_pascal(name):
return ''.join([x.capitalize() for x in name.split('_')])
with open('src/core/lib/debug/stats_data.h', 'w') as H:
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
@ -264,79 +267,114 @@ with open('src/core/lib/debug/stats_data.h', 'w') as H:
print("#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H", file=H)
print(file=H)
print("#include <grpc/support/port_platform.h>", file=H)
print("#include <atomic>", file=H)
print("#include <memory>", file=H)
print("#include <stdint.h>", file=H)
print("#include \"src/core/lib/debug/histogram_view.h\"", file=H)
print("#include \"absl/strings/string_view.h\"", file=H)
print("#include \"src/core/lib/gprpp/per_cpu.h\"", file=H)
print(file=H)
print("// IWYU pragma: private, include \"src/core/lib/debug/stats.h\"",
file=H)
print(file=H)
print("namespace grpc_core {", file=H)
for typename, instances in sorted(inst_map.items()):
print("typedef enum {", file=H)
for inst in instances:
print(" GRPC_STATS_%s_%s," % (typename.upper(), inst.name.upper()),
file=H)
print(" GRPC_STATS_%s_COUNT" % (typename.upper()), file=H)
print("} grpc_stats_%ss;" % (typename.lower()), file=H)
print("extern const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT];" %
(typename.lower(), typename.upper()),
for shape in shapes:
print("class HistogramCollector_%d_%d;" % (shape.max, shape.buckets),
file=H)
print("extern const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT];" %
(typename.lower(), typename.upper()),
print("class Histogram_%d_%d {" % (shape.max, shape.buckets), file=H)
print(" public:", file=H)
print(" static int BucketFor(int value);", file=H)
print(" const uint64_t* buckets() const { return buckets_; }", file=H)
print(
" friend Histogram_%d_%d operator-(const Histogram_%d_%d& left, const Histogram_%d_%d& right);"
% (shape.max, shape.buckets, shape.max, shape.buckets, shape.max,
shape.buckets),
file=H)
print(" private:", file=H)
print(" friend class HistogramCollector_%d_%d;" %
(shape.max, shape.buckets),
file=H)
histo_start = []
histo_buckets = []
print("typedef enum {", file=H)
first_slot = 0
for histogram in inst_map['Histogram']:
histo_start.append(first_slot)
histo_buckets.append(histogram.buckets)
print(" GRPC_STATS_HISTOGRAM_%s_FIRST_SLOT = %d," %
(histogram.name.upper(), first_slot),
print(" uint64_t buckets_[%d]{};" % shape.buckets, file=H)
print("};", file=H)
print("class HistogramCollector_%d_%d {" % (shape.max, shape.buckets),
file=H)
print(" GRPC_STATS_HISTOGRAM_%s_BUCKETS = %d," %
(histogram.name.upper(), histogram.buckets),
print(" public:", file=H)
print(" void Increment(int value) {", file=H)
print(" buckets_[Histogram_%d_%d::BucketFor(value)]" %
(shape.max, shape.buckets),
file=H)
print(" .fetch_add(1, std::memory_order_relaxed);", file=H)
print(" }", file=H)
print(" void Collect(Histogram_%d_%d* result) const;" %
(shape.max, shape.buckets),
file=H)
first_slot += histogram.buckets
print(" GRPC_STATS_HISTOGRAM_BUCKETS = %d" % first_slot, file=H)
print("} grpc_stats_histogram_constants;", file=H)
print(" private:", file=H)
print(" std::atomic<uint64_t> buckets_[%d]{};" % shape.buckets, file=H)
print("};", file=H)
print("struct GlobalStats {", file=H)
print(" enum class Counter {", file=H)
for ctr in inst_map['Counter']:
print(("#define GRPC_STATS_INC_%s() " +
"GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_%s)") %
(ctr.name.upper(), ctr.name.upper()),
print(" k%s," % snake_to_pascal(ctr.name), file=H)
print(" COUNT", file=H)
print(" };", file=H)
print(" enum class Histogram {", file=H)
for ctr in inst_map['Histogram']:
print(" k%s," % snake_to_pascal(ctr.name), file=H)
print(" COUNT", file=H)
print(" };", file=H)
print(" GlobalStats();", file=H)
print(
" static const absl::string_view counter_name[static_cast<int>(Counter::COUNT)];",
file=H)
print(
" static const absl::string_view histogram_name[static_cast<int>(Histogram::COUNT)];",
file=H)
print(
" static const absl::string_view counter_doc[static_cast<int>(Counter::COUNT)];",
file=H)
print(
" static const absl::string_view histogram_doc[static_cast<int>(Histogram::COUNT)];",
file=H)
print(" union {", file=H)
print(" struct {", file=H)
for ctr in inst_map['Counter']:
print(" uint64_t %s;" % ctr.name, file=H)
print(" };", file=H)
print(" uint64_t counters[static_cast<int>(Counter::COUNT)];", file=H)
print(" };", file=H)
for ctr in inst_map['Histogram']:
print(" Histogram_%d_%d %s;" % (ctr.max, ctr.buckets, ctr.name),
file=H)
for histogram in inst_map['Histogram']:
print(" HistogramView histogram(Histogram which) const;", file=H)
print(
" std::unique_ptr<GlobalStats> Diff(const GlobalStats& other) const;",
file=H)
print("};", file=H)
print("class GlobalStatsCollector {", file=H)
print(" public:", file=H)
print(" std::unique_ptr<GlobalStats> Collect() const;", file=H)
for ctr in inst_map['Counter']:
print(
"#define GRPC_STATS_INC_%s(value) GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, grpc_core::BucketForHistogramValue_%d_%d(static_cast<int>(value)))"
% (histogram.name.upper(), histogram.name.upper(), histogram.max,
histogram.buckets),
" void Increment%s() { data_.this_cpu().%s.fetch_add(1, std::memory_order_relaxed); }"
% (snake_to_pascal(ctr.name), ctr.name),
file=H)
print("namespace grpc_core {", file=H)
for shape in shapes:
print("int BucketForHistogramValue_%d_%d(int value);" %
(shape.max, shape.buckets),
for ctr in inst_map['Histogram']:
print(
" void Increment%s(int value) { data_.this_cpu().%s.Increment(value); }"
% (snake_to_pascal(ctr.name), ctr.name),
file=H)
print(" private:", file=H)
print(" struct Data {", file=H)
for ctr in inst_map['Counter']:
print(" std::atomic<uint64_t> %s{0};" % ctr.name, file=H)
for ctr in inst_map['Histogram']:
print(" HistogramCollector_%d_%d %s;" %
(ctr.max, ctr.buckets, ctr.name),
file=H)
print(" };", file=H)
print(" PerCpu<Data> data_;", file=H)
print("};", file=H)
print("}", file=H)
for i, tbl in enumerate(static_tables):
print("extern const %s grpc_stats_table_%d[%d];" %
(tbl[0], i, len(tbl[1])),
file=H)
print("extern const int grpc_stats_histo_buckets[%d];" %
len(inst_map['Histogram']),
file=H)
print("extern const int grpc_stats_histo_start[%d];" %
len(inst_map['Histogram']),
file=H)
print("extern const int *const grpc_stats_histo_bucket_boundaries[%d];" %
len(inst_map['Histogram']),
file=H)
print("extern int (*const grpc_stats_get_bucket[%d])(int value);" %
len(inst_map['Histogram']),
file=H)
print(file=H)
print("#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */", file=H)
@ -363,9 +401,9 @@ with open('src/core/lib/debug/stats_data.cc', 'w') as C:
print("#include <grpc/support/port_platform.h>", file=C)
print(file=C)
print("#include \"src/core/lib/debug/stats.h\"", file=C)
print("#include \"src/core/lib/debug/stats_data.h\"", file=C)
print("#include <stdint.h>", file=C)
print("#include \"absl/memory/memory.h\"", file=C)
print(file=C)
histo_code = []
@ -375,50 +413,105 @@ with open('src/core/lib/debug/stats_data.cc', 'w') as C:
histo_bucket_boundaries[shape] = bounds_idx
histo_code.append(code)
print("namespace grpc_core {", file=C)
print("namespace { union DblUint { double dbl; uint64_t uint; }; }", file=C)
for typename, instances in sorted(inst_map.items()):
print("const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT] = {" %
(typename.lower(), typename.upper()),
for shape in shapes:
print(
"void HistogramCollector_%d_%d::Collect(Histogram_%d_%d* result) const {"
% (shape.max, shape.buckets, shape.max, shape.buckets),
file=C)
print(" for (int i=0; i<%d; i++) {" % shape.buckets, file=C)
print(
" result->buckets_[i] += buckets_[i].load(std::memory_order_relaxed);",
file=C)
print(" }", file=C)
print("}", file=C)
print(
"Histogram_%d_%d operator-(const Histogram_%d_%d& left, const Histogram_%d_%d& right) {"
% (shape.max, shape.buckets, shape.max, shape.buckets, shape.max,
shape.buckets),
file=C)
print(" Histogram_%d_%d result;" % (shape.max, shape.buckets), file=C)
print(" for (int i=0; i<%d; i++) {" % shape.buckets, file=C)
print(" result.buckets_[i] = left.buckets_[i] - right.buckets_[i];",
file=C)
print(" }", file=C)
print(" return result;", file=C)
print("}", file=C)
for typename, instances in sorted(inst_map.items()):
print(
"const absl::string_view GlobalStats::%s_name[static_cast<int>(%s::COUNT)] = {"
% (typename.lower(), typename),
file=C)
for inst in instances:
print(" %s," % c_str(inst.name), file=C)
print("};", file=C)
print("const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT] = {" %
(typename.lower(), typename.upper()),
file=C)
print(
"const absl::string_view GlobalStats::%s_doc[static_cast<int>(%s::COUNT)] = {"
% (typename.lower(), typename),
file=C)
for inst in instances:
print(" %s," % c_str(inst.doc), file=C)
print("};", file=C)
print("namespace {", file=C)
for i, tbl in enumerate(static_tables):
print("const %s grpc_stats_table_%d[%d] = {%s};" %
print("const %s kStatsTable%d[%d] = {%s};" %
(tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1])),
file=C)
print("} // namespace", file=C)
print("namespace grpc_core {", file=C)
for shape, code in zip(shapes, histo_code):
print(("int BucketForHistogramValue_%d_%d(int value) {%s}") %
print(("int Histogram_%d_%d::BucketFor(int value) {%s}") %
(shape.max, shape.buckets, code),
file=C)
print("GlobalStats::GlobalStats() : %s {}" %
",".join("%s{0}" % ctr.name for ctr in inst_map['Counter']),
file=C)
print("HistogramView GlobalStats::histogram(Histogram which) const {",
file=C)
print(" switch (which) {", file=C)
print(" default: GPR_UNREACHABLE_CODE(return HistogramView());", file=C)
for inst in inst_map['Histogram']:
print(" case Histogram::k%s:" % snake_to_pascal(inst.name), file=C)
print(
" return HistogramView{&Histogram_%d_%d::BucketFor, kStatsTable%d, %d, %s.buckets()};"
% (inst.max, inst.buckets, histo_bucket_boundaries[Shape(
inst.max, inst.buckets)], inst.buckets, inst.name),
file=C)
print(" }", file=C)
print("}", file=C)
print(
"std::unique_ptr<GlobalStats> GlobalStatsCollector::Collect() const {",
file=C)
print(" auto result = std::make_unique<GlobalStats>();", file=C)
print(" for (const auto& data : data_) {", file=C)
for ctr in inst_map['Counter']:
print(" result->%s += data.%s.load(std::memory_order_relaxed);" %
(ctr.name, ctr.name),
file=C)
for h in inst_map['Histogram']:
print(" data.%s.Collect(&result->%s);" % (h.name, h.name), file=C)
print(" }", file=C)
print(" return result;", file=C)
print("}", file=C)
print(
"const int grpc_stats_histo_buckets[%d] = {%s};" %
(len(inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets)),
"std::unique_ptr<GlobalStats> GlobalStats::Diff(const GlobalStats& other) const {",
file=C)
print("const int grpc_stats_histo_start[%d] = {%s};" %
(len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start)),
file=C)
print("const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" %
(len(inst_map['Histogram']), ','.join(
'grpc_stats_table_%d' %
histo_bucket_boundaries[Shape(h.max, h.buckets)]
for h in inst_map['Histogram'])),
file=C)
print("int (*const grpc_stats_get_bucket[%d])(int value) = {%s};" %
(len(inst_map['Histogram']), ','.join(
'grpc_core::BucketForHistogramValue_%d_%d' %
(histogram.max, histogram.buckets)
for histogram in inst_map['Histogram'])),
file=C)
print(" auto result = std::make_unique<GlobalStats>();", file=C)
for ctr in inst_map['Counter']:
print(" result->%s = %s - other.%s;" % (ctr.name, ctr.name, ctr.name),
file=C)
for h in inst_map['Histogram']:
print(" result->%s = %s - other.%s;" % (h.name, h.name, h.name),
file=C)
print(" return result;", file=C)
print("}", file=C)
print("}", file=C)

@ -1937,6 +1937,8 @@ src/core/lib/config/core_configuration.cc \
src/core/lib/config/core_configuration.h \
src/core/lib/debug/event_log.cc \
src/core/lib/debug/event_log.h \
src/core/lib/debug/histogram_view.cc \
src/core/lib/debug/histogram_view.h \
src/core/lib/debug/stats.cc \
src/core/lib/debug/stats.h \
src/core/lib/debug/stats_data.cc \

@ -1725,6 +1725,8 @@ src/core/lib/config/core_configuration.cc \
src/core/lib/config/core_configuration.h \
src/core/lib/debug/event_log.cc \
src/core/lib/debug/event_log.h \
src/core/lib/debug/histogram_view.cc \
src/core/lib/debug/histogram_view.h \
src/core/lib/debug/stats.cc \
src/core/lib/debug/stats.h \
src/core/lib/debug/stats_data.cc \

Loading…
Cancel
Save