Instrument executor a little better

pull/12580/head
Craig Tiller 8 years ago
parent e29d497fa9
commit f66ed28853
  1. 48
      src/core/lib/debug/stats_data.c
  2. 24
      src/core/lib/debug/stats_data.h
  3. 8
      src/core/lib/debug/stats_data.yaml
  4. 9
      src/core/lib/iomgr/executor.c

@ -56,6 +56,8 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
"executor_wakeup_initiated",
"executor_queue_drained",
"executor_push_retries",
"executor_threads_created",
"executor_threads_used",
};
const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client side calls created by this process",
@ -98,6 +100,8 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of times an executor queue was drained",
"Number of times we raced and were forced to retry pushing a closure to "
"the executor",
"Size of the backing thread pool for overflow gRPC Core work",
"How many executor threads actually got used",
};
const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"tcp_write_size",
@ -110,6 +114,7 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = {
"http2_send_message_per_write",
"http2_send_trailing_metadata_per_write",
"http2_send_flowctl_per_write",
"executor_closures_per_wakeup",
};
const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of bytes offered to each syscall_write",
@ -122,6 +127,7 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = {
"Number of streams whose payload was written per TCP write",
"Number of streams terminated per TCP write",
"Number of flow control updates written per TCP write",
"Number of closures executed each time an executor wakes up",
};
const int grpc_stats_table_0[65] = {
0, 1, 2, 3, 4, 6, 8, 11,
@ -418,16 +424,43 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
}
const int grpc_stats_histo_buckets[10] = {64, 64, 64, 64, 64,
void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
int value) {
value = GPR_CLAMP(value, 0, 1024);
if (value < 13) {
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, value);
return;
}
union {
double dbl;
uint64_t uint;
} _val, _bkt;
_val.dbl = value;
if (_val.uint < 4637863191261478912ull) {
int bucket =
grpc_stats_table_3[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
_bkt.dbl = grpc_stats_table_2[bucket];
bucket -= (_val.uint < _bkt.uint);
GRPC_STATS_INC_HISTOGRAM(
(exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, bucket);
return;
}
GRPC_STATS_INC_HISTOGRAM((exec_ctx),
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
grpc_stats_histo_find_bucket_slow(
(exec_ctx), value, grpc_stats_table_2, 64));
}
const int grpc_stats_histo_buckets[11] = {64, 64, 64, 64, 64, 64,
64, 64, 64, 64, 64};
const int grpc_stats_histo_start[10] = {0, 64, 128, 192, 256,
320, 384, 448, 512, 576};
const int *const grpc_stats_histo_bucket_boundaries[10] = {
const int grpc_stats_histo_start[11] = {0, 64, 128, 192, 256, 320,
384, 448, 512, 576, 640};
const int *const grpc_stats_histo_bucket_boundaries[11] = {
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0,
grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_0,
grpc_stats_table_2, grpc_stats_table_2, grpc_stats_table_2,
grpc_stats_table_2};
void (*const grpc_stats_inc_histogram[10])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_table_2, grpc_stats_table_2};
void (*const grpc_stats_inc_histogram[11])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_tcp_write_size,
grpc_stats_inc_tcp_write_iov_size,
grpc_stats_inc_tcp_read_size,
@ -437,4 +470,5 @@ void (*const grpc_stats_inc_histogram[10])(grpc_exec_ctx *exec_ctx, int x) = {
grpc_stats_inc_http2_send_initial_metadata_per_write,
grpc_stats_inc_http2_send_message_per_write,
grpc_stats_inc_http2_send_trailing_metadata_per_write,
grpc_stats_inc_http2_send_flowctl_per_write};
grpc_stats_inc_http2_send_flowctl_per_write,
grpc_stats_inc_executor_closures_per_wakeup};

@ -58,6 +58,8 @@ typedef enum {
GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED,
GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED,
GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES,
GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED,
GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED,
GRPC_STATS_COUNTER_COUNT
} grpc_stats_counters;
extern const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT];
@ -73,6 +75,7 @@ typedef enum {
GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP,
GRPC_STATS_HISTOGRAM_COUNT
} grpc_stats_histograms;
extern const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT];
@ -98,7 +101,9 @@ typedef enum {
GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 576,
GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_BUCKETS = 640
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_FIRST_SLOT = 640,
GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_BUCKETS = 64,
GRPC_STATS_HISTOGRAM_BUCKETS = 704
} grpc_stats_histogram_constants;
#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
@ -179,6 +184,11 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
#define GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), \
GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED)
#define GRPC_STATS_INC_EXECUTOR_THREADS_USED(exec_ctx) \
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED)
#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value))
void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int x);
@ -214,10 +224,14 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write(
grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx,
int x);
extern const int grpc_stats_histo_buckets[10];
extern const int grpc_stats_histo_start[10];
extern const int *const grpc_stats_histo_bucket_boundaries[10];
extern void (*const grpc_stats_inc_histogram[10])(grpc_exec_ctx *exec_ctx,
#define GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, value) \
grpc_stats_inc_executor_closures_per_wakeup((exec_ctx), (int)(value))
void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx,
int x);
extern const int grpc_stats_histo_buckets[11];
extern const int grpc_stats_histo_start[11];
extern const int *const grpc_stats_histo_bucket_boundaries[11];
extern void (*const grpc_stats_inc_histogram[11])(grpc_exec_ctx *exec_ctx,
int x);
#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */

@ -135,3 +135,11 @@
- counter: executor_push_retries
doc: Number of times we raced and were forced to retry pushing a closure to
the executor
- counter: executor_threads_created
doc: Size of the backing thread pool for overflow gRPC Core work
- counter: executor_threads_used
doc: How many executor threads actually got used
- histogram: executor_closures_per_wakeup
max: 1024
buckets: 64
doc: Number of closures executed each time an executor wakes up

@ -81,6 +81,8 @@ static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
grpc_exec_ctx_flush(exec_ctx);
}
GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, n);
return n;
}
@ -150,7 +152,10 @@ static void executor_thread(void *arg) {
grpc_exec_ctx exec_ctx =
GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(&exec_ctx);
size_t subtract_depth = 0;
bool used = false;
for (;;) {
if (GRPC_TRACER_ON(executor_trace)) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
@ -170,6 +175,10 @@ static void executor_thread(void *arg) {
gpr_mu_unlock(&ts->mu);
break;
}
if (!used) {
GRPC_STATS_INC_EXECUTOR_THREADS_USED(&exec_ctx);
used = true;
}
GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
grpc_closure_list exec = ts->elems;
ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;

Loading…
Cancel
Save