Merge pull request #10275 from ctiller/memory

Memory usage tweaks
pull/10309/head
Craig Tiller 8 years ago committed by GitHub
commit 7e6b7df8d6
  1. 8
      src/core/lib/surface/channel.c
  2. 5
      test/core/memory_usage/client.c

@ -194,8 +194,14 @@ grpc_channel *grpc_channel_create(grpc_exec_ctx *exec_ctx, const char *target,
size_t grpc_channel_get_call_size_estimate(grpc_channel *channel) { size_t grpc_channel_get_call_size_estimate(grpc_channel *channel) {
#define ROUND_UP_SIZE 256 #define ROUND_UP_SIZE 256
/* We round up our current estimate to the NEXT value of ROUND_UP_SIZE.
This ensures:
1. a consistent size allocation when our estimate is drifting slowly
(which is common) - which tends to help most allocators reuse memory
2. a small amount of allowed growth over the estimate without hitting
the arena size doubling case, reducing overall memory usage */
return ((size_t)gpr_atm_no_barrier_load(&channel->call_size_estimate) + return ((size_t)gpr_atm_no_barrier_load(&channel->call_size_estimate) +
ROUND_UP_SIZE) & 2 * ROUND_UP_SIZE) &
~(size_t)(ROUND_UP_SIZE - 1); ~(size_t)(ROUND_UP_SIZE - 1);
} }

@ -237,6 +237,11 @@ int main(int argc, char **argv) {
0, grpc_slice_from_static_string("Reflector/GetAfterSvrCreation")); 0, grpc_slice_from_static_string("Reflector/GetAfterSvrCreation"));
// warmup period // warmup period
for (int i = 0; i < warmup_iterations; i++) {
send_snapshot_request(
0, grpc_slice_from_static_string("Reflector/SimpleSnapshot"));
}
for (call_idx = 0; call_idx < warmup_iterations; ++call_idx) { for (call_idx = 0; call_idx < warmup_iterations; ++call_idx) {
init_ping_pong_request(call_idx + 1); init_ping_pong_request(call_idx + 1);
} }

Loading…
Cancel
Save