Merge pull request #12398 from ctiller/server_stats

Add stats for server request serving path
pull/12134/merge
Craig Tiller 7 years ago committed by GitHub
commit af57a57a5a
  1. 52
      src/core/lib/debug/stats_data.c
  2. 23
      src/core/lib/debug/stats_data.h
  3. 11
      src/core/lib/debug/stats_data.yaml
  4. 5
      src/core/lib/surface/server.c

@ -56,6 +56,8 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"executor_wakeup_initiated",
"executor_queue_drained",
"executor_push_retries",
"server_requested_calls",
"server_slowpath_requests_queued",
};
const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client side calls created by this process",
@ -98,6 +100,9 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of times an executor queue was drained",
"Number of times we raced and were forced to retry pushing a closure to "
"the executor",
"How many calls were requested (not necessarily received) by the server",
"How many times was the server slow path taken (indicates too few "
"outstanding requests)",
};
const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"tcp_write_size",
@ -110,6 +115,7 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"http2_send_message_per_write",
"http2_send_trailing_metadata_per_write",
"http2_send_flowctl_per_write",
"server_cqs_checked",
};
const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of bytes offered to each syscall_write",
@ -122,6 +128,8 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of streams whose payload was written per TCP write",
"Number of streams terminated per TCP write",
"Number of flow control updates written per TCP write",
"How many completion queues were checked looking for a CQ that had "
"requested the incoming call",
};
const int grpc_stats_table_0[65] = {
0, 1, 2, 3, 4, 6, 8, 11,
@ -152,6 +160,8 @@ const uint8_t grpc_stats_table_3[102] = {
23, 24, 24, 24, 25, 26, 27, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32,
32, 33, 33, 34, 35, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41,
42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51};
const int grpc_stats_table_4[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
const uint8_t grpc_stats_table_5[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 16777216);
if (value < 5) {
@ -418,16 +428,41 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
}
const int grpc_stats_histo_buckets[10] = {64, 64, 64, 64, 64,
64, 64, 64, 64, 64};
const int grpc_stats_histo_start[10] = {0, 64, 128, 192, 256,
320, 384, 448, 512, 576};
const int *const grpc_stats_histo_bucket_boundaries[10] = {
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) {
value = GPR_CLAMP(value, 0, 64);
if (value < 3) {
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4625196817309499392ull) {
int bucket =
grpc_stats_table_5[((_val.uint - 4613937818241073152ull) >> 51)] + 3;
_bkt.dbl = grpc_stats_table_4[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_4, 8));
}
const int grpc_stats_histo_buckets[11] = {64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 8};
const int grpc_stats_histo_start[11] = {0, 64, 128, 192, 256, 320,
384, 448, 512, 576, 640};
const int *const grpc_stats_histo_bucket_boundaries[11] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0,
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0,
grpc_stats_table_2, grpc_stats_table_2, grpc_stats_table_2,
grpc_stats_table_2};
void (*const grpc_stats_inc_histogram[10])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_table_2, grpc_stats_table_4};
void (*const grpc_stats_inc_histogram[11])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_tcp_write_size,
grpc_stats_inc_tcp_write_iov_size,
grpc_stats_inc_tcp_read_size,
@ -437,4 +472,5 @@ void (*const grpc_stats_inc_histogram[10])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_http2_send_initial_metadata_per_write,
grpc_stats_inc_http2_send_message_per_write,
grpc_stats_inc_http2_send_trailing_metadata_per_write,
grpc_stats_inc_http2_send_flowctl_per_write};
grpc_stats_inc_http2_send_flowctl_per_write,
grpc_stats_inc_server_cqs_checked};

@ -58,6 +58,8 @@ typedef enum {
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS,
GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED,
GRPC_STATS_COUNTER_COUNT
} grpc_stats_counters;
extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
@ -73,6 +75,7 @@ typedef enum {
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
@ -98,7 +101,9 @@ typedef enum {
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 576,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_BUCKETS = 640
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 640,
GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
GRPC_STATS_HISTOGRAM_BUCKETS = 648
} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
@ -179,6 +184,11 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED)
#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value))
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x);
@ -214,10 +224,13 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int x);
extern const int grpc_stats_histo_buckets[10];
extern const int grpc_stats_histo_start[10];
extern const int *const grpc_stats_histo_bucket_boundaries[10];
extern void (*const grpc_stats_inc_histogram[10])(grpc_exec_ctx *exec_ctx,
#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x);
extern const int grpc_stats_histo_buckets[11];
extern const int grpc_stats_histo_start[11];
extern const int *const grpc_stats_histo_bucket_boundaries[11];
extern void (*const grpc_stats_inc_histogram[11])(grpc_exec_ctx *exec_ctx,
int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */

@ -135,3 +135,14 @@
- counter: executor_push_retries
doc: Number of times we raced and were forced to retry pushing a closure to
the executor
# server
- counter: server_requested_calls
doc: How many calls were requested (not necessarily received) by the server
- histogram: server_cqs_checked
buckets: 8
max: 64
doc: How many completion queues were checked looking for a CQ that had
requested the incoming call
- counter: server_slowpath_requests_queued
doc: How many times was the server slow path taken (indicates too few
outstanding requests)

@ -29,6 +29,7 @@
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/iomgr/executor.h"
#include "src/core/lib/iomgr/iomgr.h"
#include "src/core/lib/slice/slice_internal.h"
@ -540,6 +541,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
if (request_id == -1) {
continue;
} else {
GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i);
gpr_mu_lock(&calld->mu_state);
calld->state = ACTIVATED;
gpr_mu_unlock(&calld->mu_state);
@ -550,6 +552,7 @@ static void publish_new_rpc(grpc_exec_ctx *exec_ctx, void *arg,
}
/* no cq to take the request found: queue it on the slow list */
GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx);
gpr_mu_lock(&server->mu_call);
gpr_mu_lock(&calld->mu_state);
calld->state = PENDING;
@ -1432,6 +1435,7 @@ grpc_call_error grpc_server_request_call(
grpc_call_error error;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
@ -1478,6 +1482,7 @@ grpc_call_error grpc_server_request_registered_call(
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
requested_call *rc = (requested_call *)gpr_malloc(sizeof(*rc));
registered_method *rm = (registered_method *)rmp;
GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
GRPC_API_TRACE(
"grpc_server_request_registered_call("
"server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "

Loading…
Cancel
Save