[stats] Cleanup & re-enable stats system (#30610)

* [stats] Cleanup stats system

* clear out optionality

* fix

* might as well...

* Automated change: Fix sanity tests

* clean out more unused stuff

* clean out more unused stuff

* Automated change: Fix sanity tests

Co-authored-by: ctiller <ctiller@users.noreply.github.com>
pull/30766/head
Craig Tiller 3 years ago committed by GitHub
parent b8fde2ab47
commit 5b6dac02ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 88
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  2. 15
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  3. 4
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  4. 12
      src/core/ext/transport/chttp2/transport/writing.cc
  5. 35
      src/core/lib/debug/stats.cc
  6. 9
      src/core/lib/debug/stats.h
  7. 498
      src/core/lib/debug/stats_data.cc
  8. 464
      src/core/lib/debug/stats_data.h
  9. 232
      src/core/lib/debug/stats_data.yaml
  10. 88
      src/core/lib/debug/stats_data_bq_schema.sql
  11. 4
      src/core/lib/iomgr/call_combiner.cc
  12. 23
      src/core/lib/iomgr/ev_epoll1_linux.cc
  13. 2
      src/core/lib/iomgr/ev_poll_posix.cc
  14. 1
      src/core/lib/iomgr/iocp_windows.cc
  15. 13
      src/core/lib/iomgr/tcp_posix.cc
  16. 10
      src/core/lib/surface/completion_queue.cc
  17. 6
      src/core/lib/surface/server.cc
  18. 14
      test/core/debug/stats_test.cc
  19. 5
      test/core/end2end/tests/simple_request.cc
  20. 12
      test/cpp/microbenchmarks/helpers.cc
  21. 10
      tools/codegen/core/gen_stats_data.py
  22. 369
      tools/run_tests/performance/massage_qps_stats.py
  23. 1418
      tools/run_tests/performance/scenario_result_schema.json

@ -59,7 +59,7 @@
#include "src/core/ext/transport/chttp2/transport/stream_map.h"
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/stats_data.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/bitset.h"
#include "src/core/lib/gprpp/debug_location.h"
@ -794,85 +794,12 @@ static void set_write_state(grpc_chttp2_transport* t,
}
}
static void inc_initiate_write_reason(
grpc_chttp2_initiate_write_reason reason) {
switch (reason) {
case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE();
break;
case GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM();
break;
case GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE();
break;
case GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA();
break;
case GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA();
break;
case GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING();
break;
case GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS();
break;
case GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT();
break;
case GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM();
break;
case GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API();
break;
case GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL();
break;
case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL();
break;
case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS();
break;
case GRPC_CHTTP2_INITIATE_WRITE_SETTINGS_ACK:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SETTINGS_ACK();
break;
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING();
break;
case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE();
break;
case GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING();
break;
case GRPC_CHTTP2_INITIATE_WRITE_BDP_PING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING();
break;
case GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING();
break;
case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED();
break;
case GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE();
break;
case GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM:
GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM();
break;
}
}
void grpc_chttp2_initiate_write(grpc_chttp2_transport* t,
grpc_chttp2_initiate_write_reason reason) {
GPR_TIMER_SCOPE("grpc_chttp2_initiate_write", 0);
switch (t->write_state) {
case GRPC_CHTTP2_WRITE_STATE_IDLE:
inc_initiate_write_reason(reason);
set_write_state(t, GRPC_CHTTP2_WRITE_STATE_WRITING,
grpc_chttp2_initiate_write_reason_string(reason));
GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
@ -934,9 +861,6 @@ static void write_action_begin_locked(void* gt,
r = grpc_chttp2_begin_write(t);
}
if (r.writing) {
if (r.partial) {
GRPC_STATS_INC_HTTP2_PARTIAL_WRITES();
}
set_write_state(t,
r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
: GRPC_CHTTP2_WRITE_STATE_WRITING,
@ -956,7 +880,6 @@ static void write_action_begin_locked(void* gt,
continue_read_action_locked(t);
}
} else {
GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN();
set_write_state(t, GRPC_CHTTP2_WRITE_STATE_IDLE, "begin writing nothing");
GRPC_CHTTP2_UNREF_TRANSPORT(t, "writing");
}
@ -1301,8 +1224,6 @@ static void perform_stream_op_locked(void* stream_op,
grpc_transport_stream_op_batch_payload* op_payload = op->payload;
grpc_chttp2_transport* t = s->t;
GRPC_STATS_INC_HTTP2_OP_BATCHES();
s->context = op->payload->context;
s->traced = op->is_traced;
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
@ -1330,7 +1251,6 @@ static void perform_stream_op_locked(void* stream_op,
}
if (op->cancel_stream) {
GRPC_STATS_INC_HTTP2_OP_CANCEL();
grpc_chttp2_cancel_stream(t, s, op_payload->cancel_stream.cancel_error);
}
@ -1338,7 +1258,6 @@ static void perform_stream_op_locked(void* stream_op,
if (t->is_client && t->channelz_socket != nullptr) {
t->channelz_socket->RecordStreamStartedFromLocal();
}
GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA();
GPR_ASSERT(s->send_initial_metadata_finished == nullptr);
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
@ -1396,7 +1315,6 @@ static void perform_stream_op_locked(void* stream_op,
}
if (op->send_message) {
GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE();
t->num_messages_in_next_write++;
GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(
op->payload->send_message.send_message->Length());
@ -1473,7 +1391,6 @@ static void perform_stream_op_locked(void* stream_op,
}
if (op->send_trailing_metadata) {
GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA();
GPR_ASSERT(s->send_trailing_metadata_finished == nullptr);
on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
s->send_trailing_metadata_finished = add_closure_barrier(on_complete);
@ -1505,7 +1422,6 @@ static void perform_stream_op_locked(void* stream_op,
}
if (op->recv_initial_metadata) {
GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA();
GPR_ASSERT(s->recv_initial_metadata_ready == nullptr);
s->recv_initial_metadata_ready =
op_payload->recv_initial_metadata.recv_initial_metadata_ready;
@ -1521,7 +1437,6 @@ static void perform_stream_op_locked(void* stream_op,
}
if (op->recv_message) {
GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE();
GPR_ASSERT(s->recv_message_ready == nullptr);
s->recv_message_ready = op_payload->recv_message.recv_message_ready;
s->recv_message = op_payload->recv_message.recv_message;
@ -1533,7 +1448,6 @@ static void perform_stream_op_locked(void* stream_op,
}
if (op->recv_trailing_metadata) {
GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA();
GPR_ASSERT(s->collecting_stats == nullptr);
s->collecting_stats = op_payload->recv_trailing_metadata.collect_stats;
GPR_ASSERT(s->recv_trailing_metadata_finished == nullptr);

@ -35,7 +35,6 @@
#include "src/core/ext/transport/chttp2/transport/hpack_constants.h"
#include "src/core/ext/transport/chttp2/transport/hpack_encoder_table.h"
#include "src/core/ext/transport/chttp2/transport/varint.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/surface/validate_metadata.h"
#include "src/core/lib/transport/timeout_encoding.h"
@ -158,7 +157,6 @@ uint8_t* HPackCompressor::Framer::AddTiny(size_t len) {
}
void HPackCompressor::Framer::EmitIndexed(uint32_t elem_index) {
GRPC_STATS_INC_HPACK_SEND_INDEXED();
VarintWriter<1> w(elem_index);
w.Write(0x80, AddTiny(w.length()));
}
@ -180,17 +178,14 @@ static WireValue GetWireValue(Slice value, bool true_binary_enabled,
bool is_bin_hdr) {
if (is_bin_hdr) {
if (true_binary_enabled) {
GRPC_STATS_INC_HPACK_SEND_BINARY();
return WireValue(0x00, true, std::move(value));
} else {
GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64();
return WireValue(0x80, false,
Slice(grpc_chttp2_base64_encode_and_huffman_compress(
value.c_slice())));
}
} else {
/* TODO(ctiller): opportunistically compress non-binary headers */
GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
return WireValue(0x00, false, std::move(value));
}
}
@ -269,8 +264,6 @@ class StringKey {
void HPackCompressor::Framer::EmitLitHdrWithNonBinaryStringKeyIncIdx(
Slice key_slice, Slice value_slice) {
GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V();
GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
StringKey key(std::move(key_slice));
key.WritePrefix(0x40, AddTiny(key.prefix_length()));
Add(key.key());
@ -281,8 +274,6 @@ void HPackCompressor::Framer::EmitLitHdrWithNonBinaryStringKeyIncIdx(
void HPackCompressor::Framer::EmitLitHdrWithBinaryStringKeyNotIdx(
Slice key_slice, Slice value_slice) {
GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V();
GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
StringKey key(std::move(key_slice));
key.WritePrefix(0x00, AddTiny(key.prefix_length()));
Add(key.key());
@ -293,8 +284,6 @@ void HPackCompressor::Framer::EmitLitHdrWithBinaryStringKeyNotIdx(
void HPackCompressor::Framer::EmitLitHdrWithBinaryStringKeyIncIdx(
Slice key_slice, Slice value_slice) {
GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V();
GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
StringKey key(std::move(key_slice));
key.WritePrefix(0x40, AddTiny(key.prefix_length()));
Add(key.key());
@ -305,8 +294,6 @@ void HPackCompressor::Framer::EmitLitHdrWithBinaryStringKeyIncIdx(
void HPackCompressor::Framer::EmitLitHdrWithBinaryStringKeyNotIdx(
uint32_t key_index, Slice value_slice) {
GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX();
GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
BinaryStringValue emit(std::move(value_slice), use_true_binary_metadata_);
VarintWriter<4> key(key_index);
uint8_t* data = AddTiny(key.length() + emit.prefix_length());
@ -317,8 +304,6 @@ void HPackCompressor::Framer::EmitLitHdrWithBinaryStringKeyNotIdx(
void HPackCompressor::Framer::EmitLitHdrWithNonBinaryStringKeyNotIdx(
Slice key_slice, Slice value_slice) {
GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V();
GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
StringKey key(std::move(key_slice));
key.WritePrefix(0x00, AddTiny(key.prefix_length()));
Add(key.key());

@ -46,7 +46,6 @@
#include "src/core/ext/transport/chttp2/transport/frame_rst_stream.h"
#include "src/core/ext/transport/chttp2/transport/hpack_constants.h"
#include "src/core/ext/transport/chttp2/transport/internal.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/iomgr/closure.h"
#include "src/core/lib/iomgr/combiner.h"
@ -766,7 +765,6 @@ class HPackParser::String {
// decoded byte.
template <typename Out>
static bool ParseHuff(Input* input, uint32_t length, Out output) {
GRPC_STATS_INC_HPACK_RECV_HUFFMAN();
int16_t state = 0;
// Parse one half byte... we leverage some lookup tables to keep the logic
// here really simple.
@ -799,7 +797,6 @@ class HPackParser::String {
// Parse some uncompressed string bytes.
static absl::optional<String> ParseUncompressed(Input* input,
uint32_t length) {
GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED();
// Check there's enough bytes
if (input->remaining() < length) {
return input->UnexpectedEOF(absl::optional<String>());
@ -1161,7 +1158,6 @@ class HPackParser::Parser {
if (GPR_UNLIKELY(elem == nullptr)) {
return InvalidHPackIndexError(*index, false);
}
GRPC_STATS_INC_HPACK_RECV_INDEXED();
return FinishHeaderOmitFromTable(*elem);
}

@ -269,16 +269,6 @@ class WriteContext {
GPR_TIMER_SCOPE("grpc_chttp2_begin_write", 0);
}
// TODO(ctiller): make this the destructor
void FlushStats() {
GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(
initial_metadata_writes_);
GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(message_writes_);
GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(
trailing_metadata_writes_);
GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(flow_control_writes_);
}
void FlushSettings() {
if (t_->dirtied_local_settings && !t_->sent_local_settings) {
grpc_slice_buffer_add(
@ -515,9 +505,11 @@ class StreamWriteContext {
if (!data_send_context.AnyOutgoing()) {
if (t_->flow_control.remote_window() <= 0) {
GRPC_STATS_INC_HTTP2_TRANSPORT_STALLS();
report_stall(t_, s_, "transport");
grpc_chttp2_list_add_stalled_by_transport(t_, s_);
} else if (data_send_context.stream_remote_window() <= 0) {
GRPC_STATS_INC_HTTP2_STREAM_STALLS();
report_stall(t_, s_, "stream");
grpc_chttp2_list_add_stalled_by_stream(t_, s_);
}

@ -26,6 +26,7 @@
#include <algorithm>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
@ -142,28 +143,32 @@ double grpc_stats_histo_percentile(const grpc_stats_data* stats,
static_cast<double>(count) * percentile / 100.0);
}
namespace {
template <typename I>
std::string ArrayToJson(const I* values, size_t count) {
std::vector<std::string> parts;
for (size_t i = 0; i < count; i++) {
parts.push_back(absl::StrFormat("%d", values[i]));
}
return absl::StrCat("[", absl::StrJoin(parts, ","), "]");
}
} // namespace
std::string grpc_stats_data_as_json(const grpc_stats_data* data) {
std::vector<std::string> parts;
parts.push_back("{");
for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
parts.push_back(absl::StrFormat(
"\"%s\": %" PRIdPTR, grpc_stats_counter_name[i], data->counters[i]));
}
for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
parts.push_back(absl::StrFormat("\"%s\": [", grpc_stats_histogram_name[i]));
for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) {
parts.push_back(
absl::StrFormat("%s%" PRIdPTR, j == 0 ? "" : ",",
data->histograms[grpc_stats_histo_start[i] + j]));
}
parts.push_back(
absl::StrFormat("], \"%s_bkt\": [", grpc_stats_histogram_name[i]));
for (int j = 0; j < grpc_stats_histo_buckets[i]; j++) {
parts.push_back(absl::StrFormat(
"%s%d", j == 0 ? "" : ",", grpc_stats_histo_bucket_boundaries[i][j]));
}
parts.push_back("]");
"\"%s\": %s", grpc_stats_histogram_name[i],
ArrayToJson(data->histograms + grpc_stats_histo_start[i],
grpc_stats_histo_buckets[i])));
parts.push_back(
absl::StrFormat("\"%s_bkt\": %s", grpc_stats_histogram_name[i],
ArrayToJson(grpc_stats_histo_bucket_boundaries[i],
grpc_stats_histo_buckets[i])));
}
parts.push_back("}");
return absl::StrJoin(parts, "");
return absl::StrCat("{", absl::StrJoin(parts, ", "), "}");
}

@ -47,9 +47,6 @@ extern Stats* const g_stats_data;
(&::grpc_core::g_stats_data \
->per_cpu[grpc_core::ExecCtx::Get()->starting_cpu()])
/* Only collect stats if GRPC_COLLECT_STATS is defined or it is a debug build.
*/
#if defined(GRPC_COLLECT_STATS) || !defined(NDEBUG)
#define GRPC_STATS_INC_COUNTER(ctr) \
(gpr_atm_no_barrier_fetch_add(&GRPC_THREAD_STATS_DATA()->counters[(ctr)], 1))
@ -57,13 +54,7 @@ extern Stats* const g_stats_data;
(gpr_atm_no_barrier_fetch_add( \
&GRPC_THREAD_STATS_DATA()->histograms[histogram##_FIRST_SLOT + (index)], \
1))
#else /* defined(GRPC_COLLECT_STATS) || !defined(NDEBUG) */
#define GRPC_STATS_INC_COUNTER(ctr)
#define GRPC_STATS_INC_HISTOGRAM(histogram, index)
#endif /* defined(GRPC_COLLECT_STATS) || !defined(NDEBUG) */
GRPC_DEPRECATED("function is no longer needed")
inline void grpc_stats_init(void) {}
void grpc_stats_collect(grpc_stats_data* output);
// c = b-a
void grpc_stats_diff(const grpc_stats_data* b, const grpc_stats_data* a,

@ -28,259 +28,47 @@
#include "src/core/lib/gpr/useful.h"
const char* grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"client_calls_created",
"server_calls_created",
"cqs_created",
"client_channels_created",
"client_subchannels_created",
"server_channels_created",
"syscall_poll",
"syscall_wait",
"pollset_kick",
"pollset_kicked_without_poller",
"pollset_kicked_again",
"pollset_kick_wakeup_fd",
"pollset_kick_wakeup_cv",
"pollset_kick_own_thread",
"histogram_slow_lookups",
"syscall_write",
"syscall_read",
"tcp_backup_pollers_created",
"tcp_backup_poller_polls",
"http2_op_batches",
"http2_op_cancel",
"http2_op_send_initial_metadata",
"http2_op_send_message",
"http2_op_send_trailing_metadata",
"http2_op_recv_initial_metadata",
"http2_op_recv_message",
"http2_op_recv_trailing_metadata",
"http2_settings_writes",
"http2_pings_sent",
"http2_writes_begun",
"http2_writes_offloaded",
"http2_writes_continued",
"http2_partial_writes",
"http2_initiate_write_due_to_initial_write",
"http2_initiate_write_due_to_start_new_stream",
"http2_initiate_write_due_to_send_message",
"http2_initiate_write_due_to_send_initial_metadata",
"http2_initiate_write_due_to_send_trailing_metadata",
"http2_initiate_write_due_to_retry_send_ping",
"http2_initiate_write_due_to_continue_pings",
"http2_initiate_write_due_to_goaway_sent",
"http2_initiate_write_due_to_rst_stream",
"http2_initiate_write_due_to_close_from_api",
"http2_initiate_write_due_to_stream_flow_control",
"http2_initiate_write_due_to_transport_flow_control",
"http2_initiate_write_due_to_send_settings",
"http2_initiate_write_due_to_settings_ack",
"http2_initiate_write_due_to_bdp_estimator_ping",
"http2_initiate_write_due_to_flow_control_unstalled_by_setting",
"http2_initiate_write_due_to_flow_control_unstalled_by_update",
"http2_initiate_write_due_to_application_ping",
"http2_initiate_write_due_to_keepalive_ping",
"http2_initiate_write_due_to_transport_flow_control_unstalled",
"http2_initiate_write_due_to_ping_response",
"http2_initiate_write_due_to_force_rst_stream",
"http2_spurious_writes_begun",
"hpack_recv_indexed",
"hpack_recv_lithdr_incidx",
"hpack_recv_lithdr_incidx_v",
"hpack_recv_lithdr_notidx",
"hpack_recv_lithdr_notidx_v",
"hpack_recv_lithdr_nvridx",
"hpack_recv_lithdr_nvridx_v",
"hpack_recv_uncompressed",
"hpack_recv_huffman",
"hpack_recv_binary",
"hpack_recv_binary_base64",
"hpack_send_indexed",
"hpack_send_lithdr_incidx",
"hpack_send_lithdr_incidx_v",
"hpack_send_lithdr_notidx",
"hpack_send_lithdr_notidx_v",
"hpack_send_lithdr_nvridx",
"hpack_send_lithdr_nvridx_v",
"hpack_send_uncompressed",
"hpack_send_huffman",
"hpack_send_binary",
"hpack_send_binary_base64",
"combiner_locks_initiated",
"combiner_locks_scheduled_items",
"combiner_locks_scheduled_final_items",
"combiner_locks_offloaded",
"call_combiner_locks_initiated",
"call_combiner_locks_scheduled_items",
"call_combiner_set_notify_on_cancel",
"call_combiner_cancelled",
"executor_scheduled_short_items",
"executor_scheduled_long_items",
"executor_scheduled_to_self",
"executor_wakeup_initiated",
"executor_queue_drained",
"executor_push_retries",
"server_requested_calls",
"server_slowpath_requests_queued",
"cq_ev_queue_trylock_failures",
"cq_ev_queue_trylock_successes",
"cq_ev_queue_transient_pop_failures",
"client_calls_created", "server_calls_created",
"client_channels_created", "client_subchannels_created",
"server_channels_created", "histogram_slow_lookups",
"syscall_write", "syscall_read",
"http2_settings_writes", "http2_pings_sent",
"http2_writes_begun", "http2_transport_stalls",
"http2_stream_stalls",
};
const char* grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client side calls created by this process",
"Number of server side calls created by this process",
"Number of completion queues created",
"Number of client channels created",
"Number of client subchannels created",
"Number of server channels created",
"Number of polling syscalls (epoll_wait, poll, etc) made by this process",
"Number of sleeping syscalls made by this process",
"How many polling wakeups were performed by the process (only valid for "
"epoll1 right now)",
"How many times was a polling wakeup requested without an active poller "
"(only valid for epoll1 right now)",
"How many times was the same polling worker awoken repeatedly before "
"waking up (only valid for epoll1 right now)",
"How many times was an eventfd used as the wakeup vector for a polling "
"wakeup (only valid for epoll1 right now)",
"How many times was a condition variable used as the wakeup vector for a "
"polling wakeup (only valid for epoll1 right now)",
"How many times could a polling wakeup be satisfied by keeping the waking "
"thread awake? (only valid for epoll1 right now)",
"Number of times histogram increments went through the slow (binary "
"search) path",
"Number of write syscalls (or equivalent - eg sendmsg) made by this "
"process",
"Number of read syscalls (or equivalent - eg recvmsg) made by this process",
"Number of times a backup poller has been created (this can be expensive)",
"Number of polls performed on the backup poller",
"Number of batches received by HTTP2 transport",
"Number of cancelations received by HTTP2 transport",
"Number of batches containing send initial metadata",
"Number of batches containing send message",
"Number of batches containing send trailing metadata",
"Number of batches containing receive initial metadata",
"Number of batches containing receive message",
"Number of batches containing receive trailing metadata",
"Number of settings frames sent",
"Number of HTTP2 pings sent by process",
"Number of HTTP2 writes initiated",
"Number of HTTP2 writes offloaded to the executor from application threads",
"Number of HTTP2 writes that finished seeing more data needed to be "
"written",
"Number of HTTP2 writes that were made knowing there was still more data "
"to be written (we cap maximum write size to syscall_write)",
"Number of HTTP2 writes initiated due to 'initial_write'",
"Number of HTTP2 writes initiated due to 'start_new_stream'",
"Number of HTTP2 writes initiated due to 'send_message'",
"Number of HTTP2 writes initiated due to 'send_initial_metadata'",
"Number of HTTP2 writes initiated due to 'send_trailing_metadata'",
"Number of HTTP2 writes initiated due to 'retry_send_ping'",
"Number of HTTP2 writes initiated due to 'continue_pings'",
"Number of HTTP2 writes initiated due to 'goaway_sent'",
"Number of HTTP2 writes initiated due to 'rst_stream'",
"Number of HTTP2 writes initiated due to 'close_from_api'",
"Number of HTTP2 writes initiated due to 'stream_flow_control'",
"Number of HTTP2 writes initiated due to 'transport_flow_control'",
"Number of HTTP2 writes initiated due to 'send_settings'",
"Number of HTTP2 writes initiated due to 'settings_ack'",
"Number of HTTP2 writes initiated due to 'bdp_estimator_ping'",
"Number of HTTP2 writes initiated due to "
"'flow_control_unstalled_by_setting'",
"Number of HTTP2 writes initiated due to "
"'flow_control_unstalled_by_update'",
"Number of HTTP2 writes initiated due to 'application_ping'",
"Number of HTTP2 writes initiated due to 'keepalive_ping'",
"Number of HTTP2 writes initiated due to "
"'transport_flow_control_unstalled'",
"Number of HTTP2 writes initiated due to 'ping_response'",
"Number of HTTP2 writes initiated due to 'force_rst_stream'",
"Number of HTTP2 writes initiated with nothing to write",
"Number of HPACK indexed fields received",
"Number of HPACK literal headers received with incremental indexing",
"Number of HPACK literal headers received with incremental indexing and "
"literal keys",
"Number of HPACK literal headers received with no indexing",
"Number of HPACK literal headers received with no indexing and literal "
"keys",
"Number of HPACK literal headers received with never-indexing",
"Number of HPACK literal headers received with never-indexing and literal "
"keys",
"Number of uncompressed strings received in metadata",
"Number of huffman encoded strings received in metadata",
"Number of binary strings received in metadata",
"Number of binary strings received encoded in base64 in metadata",
"Number of HPACK indexed fields sent",
"Number of HPACK literal headers sent with incremental indexing",
"Number of HPACK literal headers sent with incremental indexing and "
"literal keys",
"Number of HPACK literal headers sent with no indexing",
"Number of HPACK literal headers sent with no indexing and literal keys",
"Number of HPACK literal headers sent with never-indexing",
"Number of HPACK literal headers sent with never-indexing and literal keys",
"Number of uncompressed strings sent in metadata",
"Number of huffman encoded strings sent in metadata",
"Number of binary strings received in metadata",
"Number of binary strings received encoded in base64 in metadata",
"Number of combiner lock entries by process (first items queued to a "
"combiner)",
"Number of items scheduled against combiner locks",
"Number of final items scheduled against combiner locks",
"Number of combiner locks offloaded to different threads",
"Number of call combiner lock entries by process (first items queued to a "
"call combiner)",
"Number of items scheduled against call combiner locks",
"Number of times a cancellation callback was set on a call combiner",
"Number of times a call combiner was cancelled",
"Number of finite runtime closures scheduled against the executor (gRPC "
"thread pool)",
"Number of potentially infinite runtime closures scheduled against the "
"executor (gRPC thread pool)",
"Number of closures scheduled by the executor to the executor",
"Number of thread wakeups initiated within the executor",
"Number of times an executor queue was drained",
"Number of times we raced and were forced to retry pushing a closure to "
"the executor",
"How many calls were requested (not necessarily received) by the server",
"How many times was the server slow path taken (indicates too few "
"outstanding requests)",
"Number of lock (trylock) acquisition failures on completion queue event "
"queue. High value here indicates high contention on completion queues",
"Number of lock (trylock) acquisition successes on completion queue event "
"queue.",
"Number of times NULL was popped out of completion queue's event queue "
"even though the event queue was not empty",
"Number of times sending was completely stalled by the transport flow "
"control window",
"Number of times sending was completely stalled by the stream flow control "
"window",
};
const char* grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"call_initial_size",
"poll_events_returned",
"tcp_write_size",
"tcp_write_iov_size",
"tcp_read_size",
"tcp_read_offer",
"tcp_read_offer_iov_size",
"http2_send_message_size",
"http2_send_initial_metadata_per_write",
"http2_send_message_per_write",
"http2_send_trailing_metadata_per_write",
"http2_send_flowctl_per_write",
"server_cqs_checked",
"call_initial_size", "tcp_write_size", "tcp_write_iov_size",
"tcp_read_allocation", "tcp_read_size", "tcp_read_offer",
"tcp_read_offer_iov_size", "http2_send_message_size",
};
const char* grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Initial size of the grpc_call arena created at call start",
"How many events are called for each syscall_poll",
"Number of bytes offered to each syscall_write",
"Number of byte segments offered to each syscall_write",
"Number of bytes allocated in each slice by tcp reads",
"Number of bytes received by each syscall_read",
"Number of bytes offered to each syscall_read",
"Number of byte segments offered to each syscall_read",
"Size of messages received by HTTP2 transport",
"Number of streams initiated written per TCP write",
"Number of streams whose payload was written per TCP write",
"Number of streams terminated per TCP write",
"Number of flow control updates written per TCP write",
"How many completion queues were checked looking for a CQ that had "
"requested the incoming call",
};
const int grpc_stats_table_0[65] = {
0, 1, 2, 3, 4, 5, 7, 9, 11, 14,
@ -298,27 +86,7 @@ const uint8_t grpc_stats_table_1[124] = {
33, 34, 34, 34, 35, 35, 36, 37, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
42, 42, 43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 50,
51, 51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58};
const int grpc_stats_table_2[129] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 30,
32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60,
63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 94, 98, 102, 106, 110,
114, 118, 122, 126, 131, 136, 141, 146, 151, 156, 162, 168, 174, 180, 186,
192, 199, 206, 213, 220, 228, 236, 244, 252, 260, 269, 278, 287, 297, 307,
317, 327, 338, 349, 360, 372, 384, 396, 409, 422, 436, 450, 464, 479, 494,
510, 526, 543, 560, 578, 596, 615, 634, 654, 674, 695, 717, 739, 762, 785,
809, 834, 859, 885, 912, 939, 967, 996, 1024};
const uint8_t grpc_stats_table_3[166] = {
0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16,
17, 17, 18, 19, 19, 20, 21, 21, 22, 23, 23, 24, 25, 25, 26, 26, 27, 27, 28,
28, 29, 29, 30, 30, 31, 31, 32, 32, 33, 33, 34, 34, 35, 36, 36, 37, 38, 39,
40, 40, 41, 42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51,
51, 52, 52, 53, 53, 54, 54, 55, 56, 57, 58, 59, 59, 60, 61, 62, 63, 63, 64,
65, 65, 66, 67, 67, 68, 69, 69, 70, 71, 71, 72, 72, 73, 73, 74, 75, 75, 76,
76, 77, 78, 79, 79, 80, 81, 82, 83, 84, 85, 85, 86, 87, 88, 88, 89, 90, 90,
91, 92, 92, 93, 94, 94, 95, 95, 96, 97, 97, 98, 98, 99};
const int grpc_stats_table_4[65] = {
const int grpc_stats_table_2[65] = {
0, 1, 2, 3, 4, 6, 8, 11,
15, 20, 26, 34, 44, 57, 73, 94,
121, 155, 199, 255, 327, 419, 537, 688,
@ -328,27 +96,25 @@ const int grpc_stats_table_4[65] = {
326126, 417200, 533707, 682750, 873414, 1117323, 1429345, 1828502,
2339127, 2992348, 3827987, 4896985, 6264509, 8013925, 10251880, 13114801,
16777216};
const uint8_t grpc_stats_table_5[87] = {
const uint8_t grpc_stats_table_3[87] = {
0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11,
11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 20, 20, 21, 22, 22, 23,
24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 31, 32, 33, 34, 34, 35, 36,
36, 37, 38, 39, 39, 40, 41, 41, 42, 43, 44, 44, 45, 45, 46, 47, 48, 48,
49, 50, 51, 51, 52, 53, 53, 54, 55, 56, 56, 57, 58, 58, 59};
const int grpc_stats_table_6[65] = {
const int grpc_stats_table_4[65] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
14, 16, 18, 20, 22, 24, 27, 30, 33, 36, 39, 43, 47,
51, 56, 61, 66, 72, 78, 85, 92, 100, 109, 118, 128, 139,
151, 164, 178, 193, 209, 226, 244, 264, 285, 308, 333, 359, 387,
418, 451, 486, 524, 565, 609, 656, 707, 762, 821, 884, 952, 1024};
const uint8_t grpc_stats_table_7[102] = {
const uint8_t grpc_stats_table_5[102] = {
0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
6, 7, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, 12, 13, 13, 14, 14,
14, 15, 15, 16, 16, 17, 17, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23,
23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32,
32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51};
const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
void grpc_stats_inc_call_initial_size(int value) {
value = grpc_core::Clamp(value, 0, 262144);
if (value < 6) {
@ -372,10 +138,10 @@ void grpc_stats_inc_call_initial_size(int value) {
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_0, 64));
}
void grpc_stats_inc_poll_events_returned(int value) {
value = grpc_core::Clamp(value, 0, 1024);
if (value < 29) {
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value);
void grpc_stats_inc_tcp_write_size(int value) {
value = grpc_core::Clamp(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, value);
return;
}
union {
@ -383,22 +149,22 @@ void grpc_stats_inc_poll_events_returned(int value) {
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4642789003353915392ull) {
if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4628855992006737920ull) >> 47)] + 29;
grpc_stats_table_3[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_2, 128));
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_2, 64));
}
void grpc_stats_inc_tcp_write_size(int value) {
value = grpc_core::Clamp(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, value);
void grpc_stats_inc_tcp_write_iov_size(int value) {
value = grpc_core::Clamp(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value);
return;
}
union {
@ -406,22 +172,22 @@ void grpc_stats_inc_tcp_write_size(int value) {
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
grpc_stats_table_5[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, bucket);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_tcp_write_iov_size(int value) {
value = grpc_core::Clamp(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value);
void grpc_stats_inc_tcp_read_allocation(int value) {
value = grpc_core::Clamp(value, 0, 16777216);
if (value < 5) {
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_ALLOCATION, value);
return;
}
union {
@ -429,17 +195,17 @@ void grpc_stats_inc_tcp_write_iov_size(int value) {
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
grpc_stats_table_3[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_ALLOCATION, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
GRPC_STATS_HISTOGRAM_TCP_READ_ALLOCATION,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_2, 64));
}
void grpc_stats_inc_tcp_read_size(int value) {
value = grpc_core::Clamp(value, 0, 16777216);
@ -454,15 +220,15 @@ void grpc_stats_inc_tcp_read_size(int value) {
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_4[bucket];
grpc_stats_table_3[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64));
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_2, 64));
}
void grpc_stats_inc_tcp_read_offer(int value) {
value = grpc_core::Clamp(value, 0, 16777216);
@ -477,15 +243,15 @@ void grpc_stats_inc_tcp_read_offer(int value) {
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_4[bucket];
grpc_stats_table_3[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64));
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_2, 64));
}
void grpc_stats_inc_tcp_read_offer_iov_size(int value) {
value = grpc_core::Clamp(value, 0, 1024);
@ -501,8 +267,8 @@ void grpc_stats_inc_tcp_read_offer_iov_size(int value) {
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
grpc_stats_table_5[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
bucket);
@ -510,7 +276,7 @@ void grpc_stats_inc_tcp_read_offer_iov_size(int value) {
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_http2_send_message_size(int value) {
value = grpc_core::Clamp(value, 0, 16777216);
@ -526,8 +292,8 @@ void grpc_stats_inc_http2_send_message_size(int value) {
_val.dbl = value;
if (_val.uint < 4683743612465315840ull) {
int bucket =
grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_4[bucket];
grpc_stats_table_3[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
bucket);
@ -535,152 +301,20 @@ void grpc_stats_inc_http2_send_message_size(int value) {
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64));
}
void grpc_stats_inc_http2_send_initial_metadata_per_write(int value) {
value = grpc_core::Clamp(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_http2_send_message_per_write(int value) {
value = grpc_core::Clamp(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_http2_send_trailing_metadata_per_write(int value) {
value = grpc_core::Clamp(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_http2_send_flowctl_per_write(int value) {
value = grpc_core::Clamp(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_6[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
}
void grpc_stats_inc_server_cqs_checked(int value) {
value = grpc_core::Clamp(value, 0, 64);
if (value < 3) {
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4625196817309499392ull) {
int bucket =
grpc_stats_table_9[((_val.uint - 4613937818241073152ull) >> 51)] + 3;
_bkt.dbl = grpc_stats_table_8[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM(
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_8, 8));
grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_2, 64));
}
const int grpc_stats_histo_buckets[13] = {64, 128, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64, 8};
const int grpc_stats_histo_start[13] = {0, 64, 192, 256, 320, 384, 448,
512, 576, 640, 704, 768, 832};
const int* const grpc_stats_histo_bucket_boundaries[13] = {
const int grpc_stats_histo_buckets[8] = {64, 64, 64, 64, 64, 64, 64, 64};
const int grpc_stats_histo_start[8] = {0, 64, 128, 192, 256, 320, 384, 448};
const int* const grpc_stats_histo_bucket_boundaries[8] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4,
grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6,
grpc_stats_table_8};
void (*const grpc_stats_inc_histogram[13])(int x) = {
grpc_stats_table_2, grpc_stats_table_2, grpc_stats_table_2,
grpc_stats_table_4, grpc_stats_table_2};
void (*const grpc_stats_inc_histogram[8])(int x) = {
grpc_stats_inc_call_initial_size,
grpc_stats_inc_poll_events_returned,
grpc_stats_inc_tcp_write_size,
grpc_stats_inc_tcp_write_iov_size,
grpc_stats_inc_tcp_read_allocation,
grpc_stats_inc_tcp_read_size,
grpc_stats_inc_tcp_read_offer,
grpc_stats_inc_tcp_read_offer_iov_size,
grpc_stats_inc_http2_send_message_size,
grpc_stats_inc_http2_send_initial_metadata_per_write,
grpc_stats_inc_http2_send_message_per_write,
grpc_stats_inc_http2_send_trailing_metadata_per_write,
grpc_stats_inc_http2_send_flowctl_per_write,
grpc_stats_inc_server_cqs_checked};
grpc_stats_inc_http2_send_message_size};

@ -26,119 +26,30 @@
typedef enum {
GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED,
GRPC_STATS_COUNTER_SERVER_CALLS_CREATED,
GRPC_STATS_COUNTER_CQS_CREATED,
GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED,
GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED,
GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED,
GRPC_STATS_COUNTER_SYSCALL_POLL,
GRPC_STATS_COUNTER_SYSCALL_WAIT,
GRPC_STATS_COUNTER_POLLSET_KICK,
GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER,
GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN,
GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD,
GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV,
GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD,
GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS,
GRPC_STATS_COUNTER_SYSCALL_WRITE,
GRPC_STATS_COUNTER_SYSCALL_READ,
GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED,
GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS,
GRPC_STATS_COUNTER_HTTP2_OP_BATCHES,
GRPC_STATS_COUNTER_HTTP2_OP_CANCEL,
GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA,
GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE,
GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA,
GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA,
GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE,
GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA,
GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES,
GRPC_STATS_COUNTER_HTTP2_PINGS_SENT,
GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN,
GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED,
GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED,
GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SETTINGS_ACK,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE,
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM,
GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN,
GRPC_STATS_COUNTER_HPACK_RECV_INDEXED,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX,
GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V,
GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED,
GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN,
GRPC_STATS_COUNTER_HPACK_RECV_BINARY,
GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64,
GRPC_STATS_COUNTER_HPACK_SEND_INDEXED,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX,
GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V,
GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED,
GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN,
GRPC_STATS_COUNTER_HPACK_SEND_BINARY,
GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64,
GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS,
GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED,
GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_INITIATED,
GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS,
GRPC_STATS_COUNTER_CALL_COMBINER_SET_NOTIFY_ON_CANCEL,
GRPC_STATS_COUNTER_CALL_COMBINER_CANCELLED,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS,
GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF,
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS,
GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED,
GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_FAILURES,
GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_SUCCESSES,
GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES,
GRPC_STATS_COUNTER_HTTP2_TRANSPORT_STALLS,
GRPC_STATS_COUNTER_HTTP2_STREAM_STALLS,
GRPC_STATS_COUNTER_COUNT
} grpc_stats_counters;
extern const char* grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
extern const char* grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT];
typedef enum {
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
GRPC_STATS_HISTOGRAM_TCP_READ_ALLOCATION,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
extern const char* grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
@ -146,262 +57,60 @@ extern const char* grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT];
typedef enum {
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_FIRST_SLOT = 0,
GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_FIRST_SLOT = 64,
GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED_BUCKETS = 128,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 192,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_FIRST_SLOT = 64,
GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 256,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_FIRST_SLOT = 128,
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 320,
GRPC_STATS_HISTOGRAM_TCP_READ_ALLOCATION_FIRST_SLOT = 192,
GRPC_STATS_HISTOGRAM_TCP_READ_ALLOCATION_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_FIRST_SLOT = 256,
GRPC_STATS_HISTOGRAM_TCP_READ_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 384,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_FIRST_SLOT = 320,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_FIRST_SLOT = 448,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_FIRST_SLOT = 384,
GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 512,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_FIRST_SLOT = 448,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_FIRST_SLOT = 576,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_FIRST_SLOT = 640,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_FIRST_SLOT = 704,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 832,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
GRPC_STATS_HISTOGRAM_BUCKETS = 840
GRPC_STATS_HISTOGRAM_BUCKETS = 512
} grpc_stats_histogram_constants;
#if defined(GRPC_COLLECT_STATS) || !defined(NDEBUG)
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
#define GRPC_STATS_INC_SERVER_CALLS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_CALLS_CREATED)
#define GRPC_STATS_INC_CQS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQS_CREATED)
#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED)
#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED)
#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED)
#define GRPC_STATS_INC_SYSCALL_POLL() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_POLL)
#define GRPC_STATS_INC_SYSCALL_WAIT() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_WAIT)
#define GRPC_STATS_INC_POLLSET_KICK() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK)
#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER)
#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN)
#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD)
#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV)
#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD)
#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS)
#define GRPC_STATS_INC_SYSCALL_WRITE() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_WRITE)
#define GRPC_STATS_INC_SYSCALL_READ() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_READ)
#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED)
#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS)
#define GRPC_STATS_INC_HTTP2_OP_BATCHES() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_BATCHES)
#define GRPC_STATS_INC_HTTP2_OP_CANCEL() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_CANCEL)
#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA)
#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE)
#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA)
#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA)
#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE)
#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA)
#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES)
#define GRPC_STATS_INC_HTTP2_PINGS_SENT() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_PINGS_SENT)
#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN)
#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED)
#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED)
#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SETTINGS_ACK() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SETTINGS_ACK)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE)
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM)
#define GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN)
#define GRPC_STATS_INC_HPACK_RECV_INDEXED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_INDEXED)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX)
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V)
#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED)
#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN)
#define GRPC_STATS_INC_HPACK_RECV_BINARY() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_BINARY)
#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64)
#define GRPC_STATS_INC_HPACK_SEND_INDEXED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_INDEXED)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX)
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V)
#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED)
#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN)
#define GRPC_STATS_INC_HPACK_SEND_BINARY() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_BINARY)
#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64)
#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED)
#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS)
#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS() \
GRPC_STATS_INC_COUNTER( \
GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS)
#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED)
#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_INITIATED)
#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS)
#define GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_SET_NOTIFY_ON_CANCEL)
#define GRPC_STATS_INC_CALL_COMBINER_CANCELLED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_CANCELLED)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS)
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF)
#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED)
#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED)
#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_FAILURES)
#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_SUCCESSES)
#define GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES)
#define GRPC_STATS_INC_HTTP2_TRANSPORT_STALLS() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_TRANSPORT_STALLS)
#define GRPC_STATS_INC_HTTP2_STREAM_STALLS() \
GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_STREAM_STALLS)
#define GRPC_STATS_INC_CALL_INITIAL_SIZE(value) \
grpc_stats_inc_call_initial_size((int)(value))
void grpc_stats_inc_call_initial_size(int x);
#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(value) \
grpc_stats_inc_poll_events_returned((int)(value))
void grpc_stats_inc_poll_events_returned(int x);
#define GRPC_STATS_INC_TCP_WRITE_SIZE(value) \
grpc_stats_inc_tcp_write_size((int)(value))
void grpc_stats_inc_tcp_write_size(int x);
#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(value) \
grpc_stats_inc_tcp_write_iov_size((int)(value))
void grpc_stats_inc_tcp_write_iov_size(int x);
#define GRPC_STATS_INC_TCP_READ_ALLOCATION(value) \
grpc_stats_inc_tcp_read_allocation((int)(value))
void grpc_stats_inc_tcp_read_allocation(int x);
#define GRPC_STATS_INC_TCP_READ_SIZE(value) \
grpc_stats_inc_tcp_read_size((int)(value))
void grpc_stats_inc_tcp_read_size(int x);
@ -414,136 +123,9 @@ void grpc_stats_inc_tcp_read_offer_iov_size(int x);
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(value) \
grpc_stats_inc_http2_send_message_size((int)(value))
void grpc_stats_inc_http2_send_message_size(int x);
#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(value) \
grpc_stats_inc_http2_send_initial_metadata_per_write((int)(value))
void grpc_stats_inc_http2_send_initial_metadata_per_write(int x);
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(value) \
grpc_stats_inc_http2_send_message_per_write((int)(value))
void grpc_stats_inc_http2_send_message_per_write(int x);
#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(value) \
grpc_stats_inc_http2_send_trailing_metadata_per_write((int)(value))
void grpc_stats_inc_http2_send_trailing_metadata_per_write(int x);
#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(value) \
grpc_stats_inc_http2_send_flowctl_per_write((int)(value))
void grpc_stats_inc_http2_send_flowctl_per_write(int x);
#define GRPC_STATS_INC_SERVER_CQS_CHECKED(value) \
grpc_stats_inc_server_cqs_checked((int)(value))
void grpc_stats_inc_server_cqs_checked(int x);
#else
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED()
#define GRPC_STATS_INC_SERVER_CALLS_CREATED()
#define GRPC_STATS_INC_CQS_CREATED()
#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED()
#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED()
#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED()
#define GRPC_STATS_INC_SYSCALL_POLL()
#define GRPC_STATS_INC_SYSCALL_WAIT()
#define GRPC_STATS_INC_POLLSET_KICK()
#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER()
#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN()
#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD()
#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV()
#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD()
#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS()
#define GRPC_STATS_INC_SYSCALL_WRITE()
#define GRPC_STATS_INC_SYSCALL_READ()
#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED()
#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS()
#define GRPC_STATS_INC_HTTP2_OP_BATCHES()
#define GRPC_STATS_INC_HTTP2_OP_CANCEL()
#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA()
#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE()
#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA()
#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA()
#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE()
#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA()
#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES()
#define GRPC_STATS_INC_HTTP2_PINGS_SENT()
#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN()
#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED()
#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED()
#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SETTINGS_ACK()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE()
#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM()
#define GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN()
#define GRPC_STATS_INC_HPACK_RECV_INDEXED()
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX()
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V()
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX()
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V()
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX()
#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V()
#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED()
#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN()
#define GRPC_STATS_INC_HPACK_RECV_BINARY()
#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64()
#define GRPC_STATS_INC_HPACK_SEND_INDEXED()
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX()
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V()
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX()
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V()
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX()
#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V()
#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED()
#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN()
#define GRPC_STATS_INC_HPACK_SEND_BINARY()
#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64()
#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED()
#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS()
#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS()
#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED()
#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED()
#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS()
#define GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL()
#define GRPC_STATS_INC_CALL_COMBINER_CANCELLED()
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS()
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS()
#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF()
#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED()
#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED()
#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES()
#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS()
#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED()
#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES()
#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES()
#define GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES()
#define GRPC_STATS_INC_CALL_INITIAL_SIZE(value)
#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(value)
#define GRPC_STATS_INC_TCP_WRITE_SIZE(value)
#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(value)
#define GRPC_STATS_INC_TCP_READ_SIZE(value)
#define GRPC_STATS_INC_TCP_READ_OFFER(value)
#define GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(value)
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(value)
#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(value)
#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(value)
#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(value)
#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(value)
#define GRPC_STATS_INC_SERVER_CQS_CHECKED(value)
#endif /* defined(GRPC_COLLECT_STATS) || !defined(NDEBUG) */
extern const int grpc_stats_histo_buckets[13];
extern const int grpc_stats_histo_start[13];
extern const int* const grpc_stats_histo_bucket_boundaries[13];
extern void (*const grpc_stats_inc_histogram[13])(int x);
extern const int grpc_stats_histo_buckets[8];
extern const int grpc_stats_histo_start[8];
extern const int* const grpc_stats_histo_bucket_boundaries[8];
extern void (*const grpc_stats_inc_histogram[8])(int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */

@ -24,45 +24,12 @@
max: 262144
buckets: 64
doc: Initial size of the grpc_call arena created at call start
- counter: cqs_created
doc: Number of completion queues created
- counter: client_channels_created
doc: Number of client channels created
- counter: client_subchannels_created
doc: Number of client subchannels created
- counter: server_channels_created
doc: Number of server channels created
# polling
- counter: syscall_poll
doc: Number of polling syscalls (epoll_wait, poll, etc) made by this process
- counter: syscall_wait
doc: Number of sleeping syscalls made by this process
- histogram: poll_events_returned
max: 1024
buckets: 128
doc: How many events are called for each syscall_poll
- counter: pollset_kick
doc: How many polling wakeups were performed by the process
(only valid for epoll1 right now)
- counter: pollset_kicked_without_poller
doc: How many times was a polling wakeup requested without an active poller
(only valid for epoll1 right now)
- counter: pollset_kicked_again
doc: How many times was the same polling worker awoken repeatedly before
waking up
(only valid for epoll1 right now)
- counter: pollset_kick_wakeup_fd
doc: How many times was an eventfd used as the wakeup vector for a polling
wakeup
(only valid for epoll1 right now)
- counter: pollset_kick_wakeup_cv
doc: How many times was a condition variable used as the wakeup vector for a
polling wakeup
(only valid for epoll1 right now)
- counter: pollset_kick_own_thread
doc: How many times could a polling wakeup be satisfied by keeping the waking
thread awake?
(only valid for epoll1 right now)
# stats system
- counter: histogram_slow_lookups
doc: Number of times histogram increments went through the slow
@ -80,6 +47,10 @@
max: 1024
buckets: 64
doc: Number of byte segments offered to each syscall_write
- histogram: tcp_read_allocation
max: 16777216
buckets: 64
doc: Number of bytes allocated in each slice by tcp reads
- histogram: tcp_read_size
max: 16777216
buckets: 64
@ -92,205 +63,18 @@
max: 1024
buckets: 64
doc: Number of byte segments offered to each syscall_read
- counter: tcp_backup_pollers_created
doc: Number of times a backup poller has been created (this can be expensive)
- counter: tcp_backup_poller_polls
doc: Number of polls performed on the backup poller
# chttp2
- counter: http2_op_batches
doc: Number of batches received by HTTP2 transport
- counter: http2_op_cancel
doc: Number of cancelations received by HTTP2 transport
- counter: http2_op_send_initial_metadata
doc: Number of batches containing send initial metadata
- counter: http2_op_send_message
doc: Number of batches containing send message
- counter: http2_op_send_trailing_metadata
doc: Number of batches containing send trailing metadata
- counter: http2_op_recv_initial_metadata
doc: Number of batches containing receive initial metadata
- counter: http2_op_recv_message
doc: Number of batches containing receive message
- counter: http2_op_recv_trailing_metadata
doc: Number of batches containing receive trailing metadata
- histogram: http2_send_message_size
max: 16777216
buckets: 64
doc: Size of messages received by HTTP2 transport
- histogram: http2_send_initial_metadata_per_write
max: 1024
buckets: 64
doc: Number of streams initiated written per TCP write
- histogram: http2_send_message_per_write
max: 1024
buckets: 64
doc: Number of streams whose payload was written per TCP write
- histogram: http2_send_trailing_metadata_per_write
max: 1024
buckets: 64
doc: Number of streams terminated per TCP write
- histogram: http2_send_flowctl_per_write
max: 1024
buckets: 64
doc: Number of flow control updates written per TCP write
- counter: http2_settings_writes
doc: Number of settings frames sent
- counter: http2_pings_sent
doc: Number of HTTP2 pings sent by process
- counter: http2_writes_begun
doc: Number of HTTP2 writes initiated
- counter: http2_writes_offloaded
doc: Number of HTTP2 writes offloaded to the executor from application threads
- counter: http2_writes_continued
doc: Number of HTTP2 writes that finished seeing more data needed to be
written
- counter: http2_partial_writes
doc: Number of HTTP2 writes that were made knowing there was still more data
to be written (we cap maximum write size to syscall_write)
- counter: http2_initiate_write_due_to_initial_write
doc: Number of HTTP2 writes initiated due to 'initial_write'
- counter: http2_initiate_write_due_to_start_new_stream
doc: Number of HTTP2 writes initiated due to 'start_new_stream'
- counter: http2_initiate_write_due_to_send_message
doc: Number of HTTP2 writes initiated due to 'send_message'
- counter: http2_initiate_write_due_to_send_initial_metadata
doc: Number of HTTP2 writes initiated due to 'send_initial_metadata'
- counter: http2_initiate_write_due_to_send_trailing_metadata
doc: Number of HTTP2 writes initiated due to 'send_trailing_metadata'
- counter: http2_initiate_write_due_to_retry_send_ping
doc: Number of HTTP2 writes initiated due to 'retry_send_ping'
- counter: http2_initiate_write_due_to_continue_pings
doc: Number of HTTP2 writes initiated due to 'continue_pings'
- counter: http2_initiate_write_due_to_goaway_sent
doc: Number of HTTP2 writes initiated due to 'goaway_sent'
- counter: http2_initiate_write_due_to_rst_stream
doc: Number of HTTP2 writes initiated due to 'rst_stream'
- counter: http2_initiate_write_due_to_close_from_api
doc: Number of HTTP2 writes initiated due to 'close_from_api'
- counter: http2_initiate_write_due_to_stream_flow_control
doc: Number of HTTP2 writes initiated due to 'stream_flow_control'
- counter: http2_initiate_write_due_to_transport_flow_control
doc: Number of HTTP2 writes initiated due to 'transport_flow_control'
- counter: http2_initiate_write_due_to_send_settings
doc: Number of HTTP2 writes initiated due to 'send_settings'
- counter: http2_initiate_write_due_to_settings_ack
doc: Number of HTTP2 writes initiated due to 'settings_ack'
- counter: http2_initiate_write_due_to_bdp_estimator_ping
doc: Number of HTTP2 writes initiated due to 'bdp_estimator_ping'
- counter: http2_initiate_write_due_to_flow_control_unstalled_by_setting
doc: Number of HTTP2 writes initiated due to 'flow_control_unstalled_by_setting'
- counter: http2_initiate_write_due_to_flow_control_unstalled_by_update
doc: Number of HTTP2 writes initiated due to 'flow_control_unstalled_by_update'
- counter: http2_initiate_write_due_to_application_ping
doc: Number of HTTP2 writes initiated due to 'application_ping'
- counter: http2_initiate_write_due_to_keepalive_ping
doc: Number of HTTP2 writes initiated due to 'keepalive_ping'
- counter: http2_initiate_write_due_to_transport_flow_control_unstalled
doc: Number of HTTP2 writes initiated due to 'transport_flow_control_unstalled'
- counter: http2_initiate_write_due_to_ping_response
doc: Number of HTTP2 writes initiated due to 'ping_response'
- counter: http2_initiate_write_due_to_force_rst_stream
doc: Number of HTTP2 writes initiated due to 'force_rst_stream'
- counter: http2_spurious_writes_begun
doc: Number of HTTP2 writes initiated with nothing to write
- counter: hpack_recv_indexed
doc: Number of HPACK indexed fields received
- counter: hpack_recv_lithdr_incidx
doc: Number of HPACK literal headers received with incremental indexing
- counter: hpack_recv_lithdr_incidx_v
doc: Number of HPACK literal headers received with incremental indexing and literal keys
- counter: hpack_recv_lithdr_notidx
doc: Number of HPACK literal headers received with no indexing
- counter: hpack_recv_lithdr_notidx_v
doc: Number of HPACK literal headers received with no indexing and literal keys
- counter: hpack_recv_lithdr_nvridx
doc: Number of HPACK literal headers received with never-indexing
- counter: hpack_recv_lithdr_nvridx_v
doc: Number of HPACK literal headers received with never-indexing and literal keys
- counter: hpack_recv_uncompressed
doc: Number of uncompressed strings received in metadata
- counter: hpack_recv_huffman
doc: Number of huffman encoded strings received in metadata
- counter: hpack_recv_binary
doc: Number of binary strings received in metadata
- counter: hpack_recv_binary_base64
doc: Number of binary strings received encoded in base64 in metadata
- counter: hpack_send_indexed
doc: Number of HPACK indexed fields sent
- counter: hpack_send_lithdr_incidx
doc: Number of HPACK literal headers sent with incremental indexing
- counter: hpack_send_lithdr_incidx_v
doc: Number of HPACK literal headers sent with incremental indexing and literal keys
- counter: hpack_send_lithdr_notidx
doc: Number of HPACK literal headers sent with no indexing
- counter: hpack_send_lithdr_notidx_v
doc: Number of HPACK literal headers sent with no indexing and literal keys
- counter: hpack_send_lithdr_nvridx
doc: Number of HPACK literal headers sent with never-indexing
- counter: hpack_send_lithdr_nvridx_v
doc: Number of HPACK literal headers sent with never-indexing and literal keys
- counter: hpack_send_uncompressed
doc: Number of uncompressed strings sent in metadata
- counter: hpack_send_huffman
doc: Number of huffman encoded strings sent in metadata
- counter: hpack_send_binary
doc: Number of binary strings received in metadata
- counter: hpack_send_binary_base64
doc: Number of binary strings received encoded in base64 in metadata
# combiner locks
- counter: combiner_locks_initiated
doc: Number of combiner lock entries by process
(first items queued to a combiner)
- counter: combiner_locks_scheduled_items
doc: Number of items scheduled against combiner locks
- counter: combiner_locks_scheduled_final_items
doc: Number of final items scheduled against combiner locks
- counter: combiner_locks_offloaded
doc: Number of combiner locks offloaded to different threads
# call combiner locks
- counter: call_combiner_locks_initiated
doc: Number of call combiner lock entries by process
(first items queued to a call combiner)
- counter: call_combiner_locks_scheduled_items
doc: Number of items scheduled against call combiner locks
- counter: call_combiner_set_notify_on_cancel
doc: Number of times a cancellation callback was set on a call combiner
- counter: call_combiner_cancelled
doc: Number of times a call combiner was cancelled
# executor
- counter: executor_scheduled_short_items
doc: Number of finite runtime closures scheduled against the executor
(gRPC thread pool)
- counter: executor_scheduled_long_items
doc: Number of potentially infinite runtime closures scheduled against the
executor (gRPC thread pool)
- counter: executor_scheduled_to_self
doc: Number of closures scheduled by the executor to the executor
- counter: executor_wakeup_initiated
doc: Number of thread wakeups initiated within the executor
- counter: executor_queue_drained
doc: Number of times an executor queue was drained
- counter: executor_push_retries
doc: Number of times we raced and were forced to retry pushing a closure to
the executor
# server
- counter: server_requested_calls
doc: How many calls were requested (not necessarily received) by the server
- histogram: server_cqs_checked
buckets: 8
max: 64
doc: How many completion queues were checked looking for a CQ that had
requested the incoming call
- counter: server_slowpath_requests_queued
doc: How many times was the server slow path taken (indicates too few
outstanding requests)
# cq
- counter: cq_ev_queue_trylock_failures
doc: Number of lock (trylock) acquisition failures on completion queue event
queue. High value here indicates high contention on completion queues
- counter: cq_ev_queue_trylock_successes
doc: Number of lock (trylock) acquisition successes on completion queue event
queue.
- counter: cq_ev_queue_transient_pop_failures
doc: Number of times NULL was popped out of completion queue's event queue
even though the event queue was not empty
- counter: http2_transport_stalls
doc: Number of times sending was completely stalled by the transport flow control window
- counter: http2_stream_stalls
doc: Number of times sending was completely stalled by the stream flow control window

@ -1,97 +1,13 @@
client_calls_created_per_iteration:FLOAT,
server_calls_created_per_iteration:FLOAT,
cqs_created_per_iteration:FLOAT,
client_channels_created_per_iteration:FLOAT,
client_subchannels_created_per_iteration:FLOAT,
server_channels_created_per_iteration:FLOAT,
syscall_poll_per_iteration:FLOAT,
syscall_wait_per_iteration:FLOAT,
pollset_kick_per_iteration:FLOAT,
pollset_kicked_without_poller_per_iteration:FLOAT,
pollset_kicked_again_per_iteration:FLOAT,
pollset_kick_wakeup_fd_per_iteration:FLOAT,
pollset_kick_wakeup_cv_per_iteration:FLOAT,
pollset_kick_own_thread_per_iteration:FLOAT,
histogram_slow_lookups_per_iteration:FLOAT,
syscall_write_per_iteration:FLOAT,
syscall_read_per_iteration:FLOAT,
tcp_backup_pollers_created_per_iteration:FLOAT,
tcp_backup_poller_polls_per_iteration:FLOAT,
http2_op_batches_per_iteration:FLOAT,
http2_op_cancel_per_iteration:FLOAT,
http2_op_send_initial_metadata_per_iteration:FLOAT,
http2_op_send_message_per_iteration:FLOAT,
http2_op_send_trailing_metadata_per_iteration:FLOAT,
http2_op_recv_initial_metadata_per_iteration:FLOAT,
http2_op_recv_message_per_iteration:FLOAT,
http2_op_recv_trailing_metadata_per_iteration:FLOAT,
http2_settings_writes_per_iteration:FLOAT,
http2_pings_sent_per_iteration:FLOAT,
http2_writes_begun_per_iteration:FLOAT,
http2_writes_offloaded_per_iteration:FLOAT,
http2_writes_continued_per_iteration:FLOAT,
http2_partial_writes_per_iteration:FLOAT,
http2_initiate_write_due_to_initial_write_per_iteration:FLOAT,
http2_initiate_write_due_to_start_new_stream_per_iteration:FLOAT,
http2_initiate_write_due_to_send_message_per_iteration:FLOAT,
http2_initiate_write_due_to_send_initial_metadata_per_iteration:FLOAT,
http2_initiate_write_due_to_send_trailing_metadata_per_iteration:FLOAT,
http2_initiate_write_due_to_retry_send_ping_per_iteration:FLOAT,
http2_initiate_write_due_to_continue_pings_per_iteration:FLOAT,
http2_initiate_write_due_to_goaway_sent_per_iteration:FLOAT,
http2_initiate_write_due_to_rst_stream_per_iteration:FLOAT,
http2_initiate_write_due_to_close_from_api_per_iteration:FLOAT,
http2_initiate_write_due_to_stream_flow_control_per_iteration:FLOAT,
http2_initiate_write_due_to_transport_flow_control_per_iteration:FLOAT,
http2_initiate_write_due_to_send_settings_per_iteration:FLOAT,
http2_initiate_write_due_to_settings_ack_per_iteration:FLOAT,
http2_initiate_write_due_to_bdp_estimator_ping_per_iteration:FLOAT,
http2_initiate_write_due_to_flow_control_unstalled_by_setting_per_iteration:FLOAT,
http2_initiate_write_due_to_flow_control_unstalled_by_update_per_iteration:FLOAT,
http2_initiate_write_due_to_application_ping_per_iteration:FLOAT,
http2_initiate_write_due_to_keepalive_ping_per_iteration:FLOAT,
http2_initiate_write_due_to_transport_flow_control_unstalled_per_iteration:FLOAT,
http2_initiate_write_due_to_ping_response_per_iteration:FLOAT,
http2_initiate_write_due_to_force_rst_stream_per_iteration:FLOAT,
http2_spurious_writes_begun_per_iteration:FLOAT,
hpack_recv_indexed_per_iteration:FLOAT,
hpack_recv_lithdr_incidx_per_iteration:FLOAT,
hpack_recv_lithdr_incidx_v_per_iteration:FLOAT,
hpack_recv_lithdr_notidx_per_iteration:FLOAT,
hpack_recv_lithdr_notidx_v_per_iteration:FLOAT,
hpack_recv_lithdr_nvridx_per_iteration:FLOAT,
hpack_recv_lithdr_nvridx_v_per_iteration:FLOAT,
hpack_recv_uncompressed_per_iteration:FLOAT,
hpack_recv_huffman_per_iteration:FLOAT,
hpack_recv_binary_per_iteration:FLOAT,
hpack_recv_binary_base64_per_iteration:FLOAT,
hpack_send_indexed_per_iteration:FLOAT,
hpack_send_lithdr_incidx_per_iteration:FLOAT,
hpack_send_lithdr_incidx_v_per_iteration:FLOAT,
hpack_send_lithdr_notidx_per_iteration:FLOAT,
hpack_send_lithdr_notidx_v_per_iteration:FLOAT,
hpack_send_lithdr_nvridx_per_iteration:FLOAT,
hpack_send_lithdr_nvridx_v_per_iteration:FLOAT,
hpack_send_uncompressed_per_iteration:FLOAT,
hpack_send_huffman_per_iteration:FLOAT,
hpack_send_binary_per_iteration:FLOAT,
hpack_send_binary_base64_per_iteration:FLOAT,
combiner_locks_initiated_per_iteration:FLOAT,
combiner_locks_scheduled_items_per_iteration:FLOAT,
combiner_locks_scheduled_final_items_per_iteration:FLOAT,
combiner_locks_offloaded_per_iteration:FLOAT,
call_combiner_locks_initiated_per_iteration:FLOAT,
call_combiner_locks_scheduled_items_per_iteration:FLOAT,
call_combiner_set_notify_on_cancel_per_iteration:FLOAT,
call_combiner_cancelled_per_iteration:FLOAT,
executor_scheduled_short_items_per_iteration:FLOAT,
executor_scheduled_long_items_per_iteration:FLOAT,
executor_scheduled_to_self_per_iteration:FLOAT,
executor_wakeup_initiated_per_iteration:FLOAT,
executor_queue_drained_per_iteration:FLOAT,
executor_push_retries_per_iteration:FLOAT,
server_requested_calls_per_iteration:FLOAT,
server_slowpath_requests_queued_per_iteration:FLOAT,
cq_ev_queue_trylock_failures_per_iteration:FLOAT,
cq_ev_queue_trylock_successes_per_iteration:FLOAT,
cq_ev_queue_transient_pop_failures_per_iteration:FLOAT
http2_transport_stalls_per_iteration:FLOAT,
http2_stream_stalls_per_iteration:FLOAT

@ -127,9 +127,7 @@ void CallCombiner::Start(grpc_closure* closure, grpc_error_handle error,
gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size + 1);
}
GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS();
if (prev_size == 0) {
GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED();
GPR_TIMER_MARK("call_combiner_initiate", 0);
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO, " EXECUTING IMMEDIATELY");
@ -192,7 +190,6 @@ void CallCombiner::Stop(DEBUG_ARGS const char* reason) {
}
void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL();
while (true) {
// Decode original state.
gpr_atm original_state = gpr_atm_acq_load(&cancel_state_);
@ -235,7 +232,6 @@ void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
}
void CallCombiner::Cancel(grpc_error_handle error) {
GRPC_STATS_INC_CALL_COMBINER_CANCELLED();
intptr_t status_ptr = internal::StatusAllocHeapPtr(error);
gpr_atm new_state = kErrorBit | status_ptr;
while (true) {

@ -591,20 +591,16 @@ static grpc_error_handle pollset_kick_all(grpc_pollset* pollset) {
if (pollset->root_worker != nullptr) {
grpc_pollset_worker* worker = pollset->root_worker;
do {
GRPC_STATS_INC_POLLSET_KICK();
switch (worker->state) {
case KICKED:
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
break;
case UNKICKED:
SET_KICK_STATE(worker, KICKED);
if (worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
gpr_cv_signal(&worker->cv);
}
break;
case DESIGNATED_POLLER:
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
SET_KICK_STATE(worker, KICKED);
append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
"pollset_kick_all");
@ -721,7 +717,6 @@ static grpc_error_handle do_epoll_wait(grpc_pollset* ps,
GRPC_SCHEDULING_START_BLOCKING_REGION;
}
do {
GRPC_STATS_INC_SYSCALL_POLL();
r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
timeout);
} while (r < 0 && errno == EINTR);
@ -731,8 +726,6 @@ static grpc_error_handle do_epoll_wait(grpc_pollset* ps,
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, "ps: %p poll got %d events", ps, r);
}
@ -893,7 +886,6 @@ static bool check_neighborhood_for_available_poller(
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
if (inspect_worker->initialized_cv) {
GPR_TIMER_MARK("signal worker", 0);
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
gpr_cv_signal(&inspect_worker->cv);
}
} else {
@ -952,7 +944,6 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
gpr_cv_signal(&worker->next->cv);
if (grpc_core::ExecCtx::Get()->HasWork()) {
gpr_mu_unlock(&pollset->mu);
@ -1065,7 +1056,6 @@ static grpc_error_handle pollset_work(grpc_pollset* ps,
static grpc_error_handle pollset_kick(grpc_pollset* pollset,
grpc_pollset_worker* specific_worker) {
GPR_TIMER_SCOPE("pollset_kick", 0);
GRPC_STATS_INC_POLLSET_KICK();
grpc_error_handle ret_err = GRPC_ERROR_NONE;
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
std::vector<std::string> log;
@ -1091,7 +1081,6 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
if (g_current_thread_pollset != pollset) {
grpc_pollset_worker* root_worker = pollset->root_worker;
if (root_worker == nullptr) {
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
pollset->kicked_without_poller = true;
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kicked_without_poller");
@ -1100,14 +1089,12 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
}
grpc_pollset_worker* next_worker = root_worker->next;
if (root_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. already kicked %p", root_worker);
}
SET_KICK_STATE(root_worker, KICKED);
goto done;
} else if (next_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. already kicked %p", next_worker);
}
@ -1118,7 +1105,6 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
root_worker ==
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kicked %p", root_worker);
}
@ -1126,7 +1112,6 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (next_worker->state == UNKICKED) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kicked %p", next_worker);
}
@ -1144,12 +1129,10 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
}
SET_KICK_STATE(root_worker, KICKED);
if (root_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
gpr_cv_signal(&root_worker->cv);
}
goto done;
} else {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. non-root poller %p (root=%p)", next_worker,
root_worker);
@ -1159,13 +1142,11 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
goto done;
}
} else {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
GPR_ASSERT(next_worker->state == KICKED);
SET_KICK_STATE(next_worker, KICKED);
goto done;
}
} else {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kicked while waking up");
}
@ -1181,7 +1162,6 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
}
goto done;
} else if (g_current_thread_worker == specific_worker) {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. mark %p kicked", specific_worker);
}
@ -1190,7 +1170,6 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
} else if (specific_worker ==
reinterpret_cast<grpc_pollset_worker*>(
gpr_atm_no_barrier_load(&g_active_poller))) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kick active poller");
}
@ -1198,7 +1177,6 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
goto done;
} else if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kick waiting worker");
}
@ -1206,7 +1184,6 @@ static grpc_error_handle pollset_kick(grpc_pollset* pollset,
gpr_cv_signal(&specific_worker->cv);
goto done;
} else {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (GRPC_TRACE_FLAG_ENABLED(grpc_polling_trace)) {
gpr_log(GPR_INFO, " .. kick non-waiting worker");
}

@ -771,7 +771,6 @@ static grpc_error_handle pollset_kick_ext(grpc_pollset* p,
uint32_t flags) {
GPR_TIMER_SCOPE("pollset_kick_ext", 0);
grpc_error_handle error = GRPC_ERROR_NONE;
GRPC_STATS_INC_POLLSET_KICK();
/* pollset->mu already held */
if (specific_worker != nullptr) {
@ -1024,7 +1023,6 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
/* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
even going into the blocking annotation if possible */
GRPC_SCHEDULING_START_BLOCKING_REGION;
GRPC_STATS_INC_SYSCALL_POLL();
r = grpc_poll_function(pfds, pfd_count, timeout);
GRPC_SCHEDULING_END_BLOCKING_REGION;

@ -63,7 +63,6 @@ grpc_iocp_work_status grpc_iocp_work(grpc_core::Timestamp deadline) {
LPOVERLAPPED overlapped;
grpc_winsocket* socket;
grpc_winsocket_callback_info* info;
GRPC_STATS_INC_SYSCALL_POLL();
success =
GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key, &overlapped,
deadline_to_millis_timeout(deadline));

@ -586,7 +586,6 @@ static void run_poller(void* bp, grpc_error_handle /*error_ignored*/) {
gpr_mu_lock(p->pollset_mu);
grpc_core::Timestamp deadline =
grpc_core::ExecCtx::Get()->Now() + grpc_core::Duration::Seconds(10);
GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS();
GRPC_LOG_IF_ERROR(
"backup_poller:pollset_work",
grpc_pollset_work(BACKUP_POLLER_POLLSET(p), nullptr, deadline));
@ -647,7 +646,6 @@ static void cover_self(grpc_tcp* tcp) {
g_backup_poller = p;
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
g_backup_poller_mu->Unlock();
GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED();
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
}
@ -1028,12 +1026,11 @@ static void maybe_make_read_slices(grpc_tcp* tcp)
std::max(tcp->min_read_chunk_size, tcp->min_progress_size);
int max_read_chunk_size =
std::max(tcp->max_read_chunk_size, tcp->min_progress_size);
grpc_slice_buffer_add_indexed(
tcp->incoming_buffer,
tcp->memory_owner.MakeSlice(grpc_core::MemoryRequest(
min_read_chunk_size,
grpc_core::Clamp(extra_wanted, min_read_chunk_size,
max_read_chunk_size))));
grpc_slice slice = tcp->memory_owner.MakeSlice(grpc_core::MemoryRequest(
min_read_chunk_size, grpc_core::Clamp(extra_wanted, min_read_chunk_size,
max_read_chunk_size)));
GRPC_STATS_INC_TCP_READ_ALLOCATION(GRPC_SLICE_LENGTH(slice));
grpc_slice_buffer_add_indexed(tcp->incoming_buffer, slice);
maybe_post_reclaimer(tcp);
}
}

@ -40,7 +40,6 @@
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/gpr/spinlock.h"
#include "src/core/lib/gpr/tls.h"
#include "src/core/lib/gprpp/atomic_utils.h"
@ -489,17 +488,9 @@ grpc_cq_completion* CqEventQueue::Pop() {
grpc_cq_completion* c = nullptr;
if (gpr_spinlock_trylock(&queue_lock_)) {
GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES();
bool is_empty = false;
c = reinterpret_cast<grpc_cq_completion*>(queue_.PopAndCheckEnd(&is_empty));
gpr_spinlock_unlock(&queue_lock_);
if (c == nullptr && !is_empty) {
GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES();
}
} else {
GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES();
}
if (c) {
@ -526,7 +517,6 @@ grpc_completion_queue* grpc_completion_queue_create_internal(
&g_poller_vtable_by_poller_type[polling_type];
grpc_core::ExecCtx exec_ctx;
GRPC_STATS_INC_CQS_CREATED();
cq = static_cast<grpc_completion_queue*>(
gpr_zalloc(sizeof(grpc_completion_queue) + vtable->data_size +

@ -45,7 +45,6 @@
#include "src/core/lib/channel/channel_trace.h"
#include "src/core/lib/channel/channelz.h"
#include "src/core/lib/config/core_configuration.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/debug_location.h"
#include "src/core/lib/gprpp/mpscq.h"
@ -275,14 +274,12 @@ class Server::RealRequestMatcher : public RequestMatcherInterface {
RequestedCall* rc =
reinterpret_cast<RequestedCall*>(requests_per_cq_[cq_idx].TryPop());
if (rc != nullptr) {
GRPC_STATS_INC_SERVER_CQS_CHECKED(i);
calld->SetState(CallData::CallState::ACTIVATED);
calld->Publish(cq_idx, rc);
return;
}
}
// No cq to take the request found; queue it on the slow list.
GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED();
// We need to ensure that all the queues are empty. We do this under
// the server mu_call_ lock to ensure that if something is added to
// an empty request queue, it will block until the call is actually
@ -306,7 +303,6 @@ class Server::RealRequestMatcher : public RequestMatcherInterface {
return;
}
}
GRPC_STATS_INC_SERVER_CQS_CHECKED(loop_count + requests_per_cq_.size());
calld->SetState(CallData::CallState::ACTIVATED);
calld->Publish(cq_idx, rc);
}
@ -1525,7 +1521,6 @@ grpc_call_error grpc_server_request_call(
grpc_completion_queue* cq_for_notification, void* tag) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
GRPC_API_TRACE(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
@ -1546,7 +1541,6 @@ grpc_call_error grpc_server_request_registered_call(
grpc_completion_queue* cq_for_notification, void* tag_new) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
auto* rm =
static_cast<grpc_core::Server::RegisteredMethod*>(registered_method);
GRPC_API_TRACE(

@ -63,9 +63,10 @@ TEST(StatsTest, IncSpecificCounter) {
std::unique_ptr<Snapshot> snapshot(new Snapshot);
grpc_core::ExecCtx exec_ctx;
GRPC_STATS_INC_SYSCALL_POLL();
GRPC_STATS_INC_CLIENT_CALLS_CREATED();
EXPECT_EQ(snapshot->delta().counters[GRPC_STATS_COUNTER_SYSCALL_POLL], 1);
EXPECT_EQ(snapshot->delta().counters[GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED],
1);
}
static int FindExpectedBucket(int i, int j) {
@ -145,19 +146,10 @@ INSTANTIATE_TEST_SUITE_P(HistogramTestCases, HistogramTest,
} // namespace grpc
int main(int argc, char** argv) {
/* Only run this test if GRPC_COLLECT_STATS is defined or if it is a debug
* build.
*/
#if defined(GRPC_COLLECT_STATS) || !defined(NDEBUG)
grpc::testing::TestEnvironment env(&argc, argv);
::testing::InitGoogleTest(&argc, argv);
grpc_init();
int ret = RUN_ALL_TESTS();
grpc_shutdown();
return ret;
#else
// Avoid unused parameter warning for conditional parameters.
(void)argc;
(void)argv;
#endif
}

@ -121,9 +121,7 @@ static void simple_request_body(grpc_end2end_test_config config,
grpc_stats_data* after =
static_cast<grpc_stats_data*>(gpr_malloc(sizeof(grpc_stats_data)));
#if defined(GRPC_COLLECT_STATS) || !defined(NDEBUG)
grpc_stats_collect(before);
#endif /* defined(GRPC_COLLECT_STATS) || !defined(NDEBUG) */
gpr_timespec deadline = five_seconds_from_now();
c = grpc_channel_create_call(f.client, nullptr, GRPC_PROPAGATE_DEFAULTS, f.cq,
@ -242,7 +240,6 @@ static void simple_request_body(grpc_end2end_test_config config,
if (config.feature_mask & FEATURE_MASK_SUPPORTS_REQUEST_PROXYING) {
expected_calls *= 2;
}
#if defined(GRPC_COLLECT_STATS) || !defined(NDEBUG)
grpc_stats_collect(after);
@ -254,7 +251,7 @@ static void simple_request_body(grpc_end2end_test_config config,
GPR_ASSERT(after->counters[GRPC_STATS_COUNTER_SERVER_CALLS_CREATED] -
before->counters[GRPC_STATS_COUNTER_SERVER_CALLS_CREATED] ==
expected_calls);
#endif /* defined(GRPC_COLLECT_STATS) || !defined(NDEBUG) */
gpr_free(before);
gpr_free(after);
}

@ -59,11 +59,6 @@ void TrackCounters::AddLabel(const std::string& label) {
}
void TrackCounters::AddToLabel(std::ostream& out, benchmark::State& state) {
// Use the parameters to avoid unused-parameter warnings depending on the
// #define's present
(void)out;
(void)state;
#ifdef GRPC_COLLECT_STATS
grpc_stats_data stats_end;
grpc_stats_collect(&stats_end);
grpc_stats_data stats;
@ -75,11 +70,12 @@ void TrackCounters::AddToLabel(std::ostream& out, benchmark::State& state) {
}
for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
out << " " << grpc_stats_histogram_name[i] << "-median:"
<< grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 50.0)
<< grpc_stats_histo_percentile(
&stats, static_cast<grpc_stats_histograms>(i), 50.0)
<< " " << grpc_stats_histogram_name[i] << "-99p:"
<< grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 99.0);
<< grpc_stats_histo_percentile(
&stats, static_cast<grpc_stats_histograms>(i), 99.0);
}
#endif
#ifdef GPR_LOW_LEVEL_COUNTERS
out << " locks/iter:"
<< ((double)(gpr_atm_no_barrier_load(&gpr_mu_locks) -

@ -278,7 +278,6 @@ with open('src/core/lib/debug/stats_data.h', 'w') as H:
print(" GRPC_STATS_HISTOGRAM_BUCKETS = %d" % first_slot, file=H)
print("} grpc_stats_histogram_constants;", file=H)
print("#if defined(GRPC_COLLECT_STATS) || !defined(NDEBUG)", file=H)
for ctr in inst_map['Counter']:
print(("#define GRPC_STATS_INC_%s() " +
"GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_%s)") %
@ -291,15 +290,6 @@ with open('src/core/lib/debug/stats_data.h', 'w') as H:
file=H)
print("void grpc_stats_inc_%s(int x);" % histogram.name.lower(), file=H)
print("#else", file=H)
for ctr in inst_map['Counter']:
print(("#define GRPC_STATS_INC_%s() ") % (ctr.name.upper()), file=H)
for histogram in inst_map['Histogram']:
print("#define GRPC_STATS_INC_%s(value)" % (histogram.name.upper()),
file=H)
print("#endif /* defined(GRPC_COLLECT_STATS) || !defined(NDEBUG) */",
file=H)
for i, tbl in enumerate(static_tables):
print("extern const %s grpc_stats_table_%d[%d];" %
(tbl[0], i, len(tbl[1])),

@ -30,8 +30,6 @@ def massage_qps_stats(scenario_result):
stats[
"core_server_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_calls_created")
stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
core_stats, "cqs_created")
stats[
"core_client_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_channels_created")
@ -41,27 +39,6 @@ def massage_qps_stats(scenario_result):
stats[
"core_server_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_channels_created")
stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_poll")
stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_wait")
stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick")
stats[
"core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_without_poller")
stats[
"core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_again")
stats[
"core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_fd")
stats[
"core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_cv")
stats[
"core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_own_thread")
stats[
"core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
core_stats, "histogram_slow_lookups")
@ -69,34 +46,6 @@ def massage_qps_stats(scenario_result):
core_stats, "syscall_write")
stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_read")
stats[
"core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_pollers_created")
stats[
"core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_poller_polls")
stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_batches")
stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_cancel")
stats[
"core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_initial_metadata")
stats[
"core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_message")
stats[
"core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_trailing_metadata")
stats[
"core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_initial_metadata")
stats[
"core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_message")
stats[
"core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_trailing_metadata")
stats[
"core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_settings_writes")
@ -106,215 +55,11 @@ def massage_qps_stats(scenario_result):
"core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_begun")
stats[
"core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_offloaded")
stats[
"core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_continued")
stats[
"core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_partial_writes")
stats[
"core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_initial_write")
stats[
"core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_start_new_stream")
stats[
"core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_message")
stats[
"core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_initial_metadata")
stats[
"core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_trailing_metadata")
stats[
"core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_retry_send_ping")
stats[
"core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_continue_pings")
stats[
"core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_goaway_sent")
stats[
"core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_rst_stream")
stats[
"core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_close_from_api")
stats[
"core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_stream_flow_control")
stats[
"core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control")
stats[
"core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_settings")
stats[
"core_http2_initiate_write_due_to_settings_ack"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_settings_ack")
stats[
"core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_bdp_estimator_ping")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_setting"
)
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_update"
)
stats[
"core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_application_ping")
stats[
"core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_keepalive_ping")
stats[
"core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control_unstalled"
)
stats[
"core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_ping_response")
stats[
"core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_force_rst_stream")
stats[
"core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_spurious_writes_begun")
stats[
"core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_indexed")
stats[
"core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx")
stats[
"core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx_v")
stats[
"core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx")
stats[
"core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx_v")
stats[
"core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx")
stats[
"core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx_v")
stats[
"core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_uncompressed")
stats[
"core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_huffman")
stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary")
stats[
"core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary_base64")
stats[
"core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_indexed")
stats[
"core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx")
stats[
"core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx_v")
stats[
"core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx")
stats[
"core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx_v")
stats[
"core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx")
stats[
"core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx_v")
stats[
"core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_uncompressed")
stats[
"core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_huffman")
stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary")
stats[
"core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary_base64")
stats[
"core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_initiated")
stats[
"core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_items")
stats[
"core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_final_items")
stats[
"core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_offloaded")
stats[
"core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_initiated")
stats[
"core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_scheduled_items")
stats[
"core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_set_notify_on_cancel")
stats[
"core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_cancelled")
stats[
"core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_short_items")
stats[
"core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_long_items")
"core_http2_transport_stalls"] = massage_qps_stats_helpers.counter(
core_stats, "http2_transport_stalls")
stats[
"core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_to_self")
stats[
"core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "executor_wakeup_initiated")
stats[
"core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
core_stats, "executor_queue_drained")
stats[
"core_executor_push_retries"] = massage_qps_stats_helpers.counter(
core_stats, "executor_push_retries")
stats[
"core_server_requested_calls"] = massage_qps_stats_helpers.counter(
core_stats, "server_requested_calls")
stats[
"core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
core_stats, "server_slowpath_requests_queued")
stats[
"core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_failures")
stats[
"core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_successes")
stats[
"core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_transient_pop_failures")
"core_http2_stream_stalls"] = massage_qps_stats_helpers.counter(
core_stats, "http2_stream_stalls")
h = massage_qps_stats_helpers.histogram(core_stats,
"call_initial_size")
stats["core_call_initial_size"] = ",".join(
@ -330,21 +75,6 @@ def massage_qps_stats(scenario_result):
stats[
"core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"poll_events_returned")
stats["core_poll_events_returned"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_poll_events_returned_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_size")
stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
@ -374,6 +104,21 @@ def massage_qps_stats(scenario_result):
stats[
"core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_allocation")
stats["core_tcp_read_allocation"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_tcp_read_allocation_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_allocation_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_allocation_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_allocation_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_size_bkts"] = ",".join(
@ -431,79 +176,3 @@ def massage_qps_stats(scenario_result):
stats[
"core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_initial_metadata_per_write")
stats["core_http2_send_initial_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_message_per_write")
stats["core_http2_send_message_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_trailing_metadata_per_write")
stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats[
"core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_flowctl_per_write")
stats["core_http2_send_flowctl_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"server_cqs_checked")
stats["core_server_cqs_checked"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_server_cqs_checked_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save