Expose histograms via microbenchmarks

pull/12357/head
Craig Tiller 8 years ago
parent 520d76d698
commit 5489d41c15
  1. 67
      src/core/lib/debug/stats.c
  2. 8
      src/core/lib/debug/stats.h
  3. 60
      src/core/lib/debug/stats_data.h
  4. 23
      src/core/lib/debug/stats_data.yaml
  5. 13
      test/cpp/microbenchmarks/helpers.cc
  6. 18
      tools/codegen/core/gen_stats_data.py

@ -52,6 +52,16 @@ void grpc_stats_collect(grpc_stats_data *output) {
}
}
void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a,
grpc_stats_data *c) {
for (size_t i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
c->counters[i] = b->counters[i] - a->counters[i];
}
for (size_t i = 0; i < GRPC_STATS_HISTOGRAM_BUCKETS; i++) {
c->histograms[i] = b->histograms[i] - a->histograms[i];
}
}
int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value,
const double *table, int table_size) {
GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx);
@ -72,6 +82,63 @@ int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value,
return a;
}
size_t grpc_stats_histo_count(const grpc_stats_data *stats,
grpc_stats_histograms histogram) {
size_t sum = 0;
for (int i = 0; i < grpc_stats_histo_buckets[histogram]; i++) {
sum += (size_t)stats->histograms[grpc_stats_histo_start[histogram] + i];
}
return sum;
}
static double threshold_for_count_below(const gpr_atm *bucket_counts,
const double *bucket_boundaries,
int num_buckets, double count_below) {
double count_so_far;
double lower_bound;
double upper_bound;
int lower_idx;
int upper_idx;
/* find the lowest bucket that gets us above count_below */
count_so_far = 0.0;
for (lower_idx = 0; lower_idx < num_buckets; lower_idx++) {
count_so_far += (double)bucket_counts[lower_idx];
if (count_so_far >= count_below) {
break;
}
}
if (count_so_far == count_below) {
/* this bucket hits the threshold exactly... we should be midway through
any run of zero values following the bucket */
for (upper_idx = lower_idx + 1; upper_idx < num_buckets; upper_idx++) {
if (bucket_counts[upper_idx]) {
break;
}
}
return (bucket_boundaries[lower_idx] + bucket_boundaries[upper_idx]) / 2.0;
} else {
/* treat values as uniform throughout the bucket, and find where this value
should lie */
lower_bound = bucket_boundaries[lower_idx];
upper_bound = bucket_boundaries[lower_idx + 1];
return upper_bound -
(upper_bound - lower_bound) * (count_so_far - count_below) /
(double)bucket_counts[lower_idx];
}
}
double grpc_stats_histo_percentile(const grpc_stats_data *stats,
grpc_stats_histograms histogram,
double percentile) {
size_t count = grpc_stats_histo_count(stats, histogram);
if (count == 0) return 0.0;
return threshold_for_count_below(
stats->histograms + grpc_stats_histo_start[histogram],
grpc_stats_histo_bucket_boundaries[histogram],
grpc_stats_histo_buckets[histogram], (double)count * percentile / 100.0);
}
char *grpc_stats_data_as_json(const grpc_stats_data *data) {
gpr_strvec v;
char *tmp;

@ -46,8 +46,16 @@ extern grpc_stats_data *grpc_stats_per_cpu_storage;
void grpc_stats_init(void);
void grpc_stats_shutdown(void);
void grpc_stats_collect(grpc_stats_data *output);
// c = b-a
void grpc_stats_diff(const grpc_stats_data *b, const grpc_stats_data *a,
grpc_stats_data *c);
char *grpc_stats_data_as_json(const grpc_stats_data *data);
int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx *exec_ctx, double value,
const double *table, int table_size);
double grpc_stats_histo_percentile(const grpc_stats_data *data,
grpc_stats_histograms histogram,
double percentile);
size_t grpc_stats_histo_count(const grpc_stats_data *data,
grpc_stats_histograms histogram);
#endif

@ -66,64 +66,72 @@ typedef enum {
GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS)
#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
do { \
double _hist_val = (double)(value); \
if (_hist_val < 0) _hist_val = 0; \
uint64_t _hist_idx = *(uint64_t *)&_hist_val; \
if (_hist_val < 5.000000) { \
union { \
double dbl; \
uint64_t uint; \
} _val; \
_val.dbl = (double)(value); \
if (_val.dbl < 0) _val.dbl = 0; \
if (_val.dbl < 5.000000) { \
GRPC_STATS_INC_HISTOGRAM( \
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, (int)_hist_val); \
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, (int)_val.dbl); \
} else { \
if (_hist_idx < 4715268809856909312ull) { \
if (_val.uint < 4715268809856909312ull) { \
GRPC_STATS_INC_HISTOGRAM( \
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, \
grpc_stats_table_1[((_hist_idx - 4617315517961601024ull) >> 50)]); \
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)]); \
} else { \
GRPC_STATS_INC_HISTOGRAM( \
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, \
grpc_stats_histo_find_bucket_slow((exec_ctx), (value), \
grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \
grpc_stats_table_0, 64)); \
} \
} \
} while (false)
#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \
do { \
double _hist_val = (double)(value); \
if (_hist_val < 0) _hist_val = 0; \
uint64_t _hist_idx = *(uint64_t *)&_hist_val; \
if (_hist_val < 12.000000) { \
GRPC_STATS_INC_HISTOGRAM((exec_ctx), \
GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \
(int)_hist_val); \
union { \
double dbl; \
uint64_t uint; \
} _val; \
_val.dbl = (double)(value); \
if (_val.dbl < 0) _val.dbl = 0; \
if (_val.dbl < 12.000000) { \
GRPC_STATS_INC_HISTOGRAM( \
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, (int)_val.dbl); \
} else { \
if (_hist_idx < 4652218415073722368ull) { \
if (_val.uint < 4652218415073722368ull) { \
GRPC_STATS_INC_HISTOGRAM( \
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \
grpc_stats_table_3[((_hist_idx - 4622945017495814144ull) >> 49)]); \
grpc_stats_table_3[((_val.uint - 4622945017495814144ull) >> 49)]); \
} else { \
GRPC_STATS_INC_HISTOGRAM( \
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, \
grpc_stats_histo_find_bucket_slow((exec_ctx), (value), \
grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \
grpc_stats_table_2, 64)); \
} \
} \
} while (false)
#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \
do { \
double _hist_val = (double)(value); \
if (_hist_val < 0) _hist_val = 0; \
uint64_t _hist_idx = *(uint64_t *)&_hist_val; \
if (_hist_val < 5.000000) { \
union { \
double dbl; \
uint64_t uint; \
} _val; \
_val.dbl = (double)(value); \
if (_val.dbl < 0) _val.dbl = 0; \
if (_val.dbl < 5.000000) { \
GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \
(int)_hist_val); \
(int)_val.dbl); \
} else { \
if (_hist_idx < 4715268809856909312ull) { \
if (_val.uint < 4715268809856909312ull) { \
GRPC_STATS_INC_HISTOGRAM( \
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \
grpc_stats_table_1[((_hist_idx - 4617315517961601024ull) >> 50)]); \
grpc_stats_table_1[((_val.uint - 4617315517961601024ull) >> 50)]); \
} else { \
GRPC_STATS_INC_HISTOGRAM( \
(exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, \
grpc_stats_histo_find_bucket_slow((exec_ctx), (value), \
grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, \
grpc_stats_table_0, 64)); \
} \
} \

@ -1,10 +1,19 @@
#Stats data declaration
#use tools / codegen / core / gen_stats_data.py to turn this into stats_data.h
- counter:client_calls_created -
counter:server_calls_created - counter:syscall_write -
counter:syscall_read - counter:syscall_poll - counter:syscall_wait -
counter:histogram_slow_lookups -
histogram:tcp_write_size max:16777216 #16 meg max write tracked buckets:64 -
histogram:tcp_write_iov_size max:1024 buckets:64 -
histogram:tcp_read_size max:16777216 buckets:64
- counter: client_calls_created
- counter: server_calls_created
- counter: syscall_write
- counter: syscall_read
- counter: syscall_poll
- counter: syscall_wait
- counter: histogram_slow_lookups
- histogram: tcp_write_size
max: 16777216 # 16 meg max write tracked
buckets: 64
- histogram: tcp_write_iov_size
max: 1024
buckets: 64
- histogram: tcp_read_size
max: 16777216
buckets: 64

@ -31,10 +31,17 @@ void TrackCounters::Finish(benchmark::State &state) {
void TrackCounters::AddToLabel(std::ostream &out, benchmark::State &state) {
grpc_stats_data stats_end;
grpc_stats_collect(&stats_end);
grpc_stats_data stats;
grpc_stats_diff(&stats_end, &stats_begin_, &stats);
for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
out << " " << grpc_stats_counter_name[i] << "/iter:"
<< ((double)(stats_end.counters[i] - stats_begin_.counters[i]) /
(double)state.iterations());
out << " " << grpc_stats_counter_name[i]
<< "/iter:" << ((double)stats.counters[i] / (double)state.iterations());
}
for (int i = 0; i < GRPC_STATS_HISTOGRAM_COUNT; i++) {
out << " " << grpc_stats_histogram_name[i] << "-median:"
<< grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 50.0)
<< " " << grpc_stats_histogram_name[i] << "-99p:"
<< grpc_stats_histo_percentile(&stats, (grpc_stats_histograms)i, 99.0);
}
#ifdef GPR_LOW_LEVEL_COUNTERS
grpc_memory_counters counters_at_end = grpc_memory_counters_snapshot();

@ -126,27 +126,27 @@ def gen_bucket_code(histogram):
print first_nontrivial, shift_data, bounds
if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]]
code = 'do {\\\n'
code += 'double _hist_val = (double)(value);\\\n'
code += 'if (_hist_val < 0) _hist_val = 0;\\\n'
code += 'uint64_t _hist_idx = *(uint64_t*)&_hist_val;\\\n'
code += ' union { double dbl; uint64_t uint; } _val;\\\n'
code += '_val.dbl = (double)(value);\\\n'
code += 'if (_val.dbl < 0) _val.dbl = 0;\\\n'
map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data)
if first_nontrivial is None:
code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_hist_val);\\\n'
code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_val.dbl);\\\n'
% histogram.name.upper())
else:
code += 'if (_hist_val < %f) {\\\n' % first_nontrivial
code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_hist_val);\\\n'
code += 'if (_val.dbl < %f) {\\\n' % first_nontrivial
code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, (int)_val.dbl);\\\n'
% histogram.name.upper())
code += '} else {'
first_nontrivial_code = dbl2u64(first_nontrivial)
if shift_data is not None:
map_table_idx = decl_static_table(map_table, type_for_uint_table(map_table))
code += 'if (_hist_idx < %dull) {\\\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code)
code += 'if (_val.uint < %dull) {\\\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code)
code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, ' % histogram.name.upper()
code += 'grpc_stats_table_%d[((_hist_idx - %dull) >> %d)]);\\\n' % (map_table_idx, first_nontrivial_code, shift_data[0])
code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)]);\\\n' % (map_table_idx, first_nontrivial_code, shift_data[0])
code += '} else {\\\n'
code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper()
code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), (value), grpc_stats_table_%d, %d));\\\n' % (bounds_idx, len(bounds))
code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), _val.dbl, grpc_stats_table_%d, %d));\\\n' % (bounds_idx, len(bounds))
if shift_data is not None:
code += '}'
code += '}'

Loading…
Cancel
Save