Merge pull request #18966 from apolcyn/revert_arena_change

Revert arena change
pull/18970/head
apolcyn 6 years ago committed by GitHub
commit 4e94c5adba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 3
      include/grpc/impl/codegen/port_platform.h
  2. 2
      include/grpc/support/alloc.h
  3. 22
      src/core/ext/filters/client_channel/subchannel.cc
  4. 43
      src/core/lib/channel/channel_stack.cc
  5. 73
      src/core/lib/gpr/alloc.cc
  6. 10
      src/core/lib/gpr/alloc.h
  7. 8
      src/core/lib/gprpp/arena.cc
  8. 6
      src/core/lib/gprpp/arena.h
  9. 6
      src/core/lib/surface/call.cc
  10. 2
      src/core/lib/transport/transport.cc
  11. 18
      test/core/gpr/alloc_test.cc
  12. 26
      test/core/util/memory_counters.cc

@ -77,7 +77,6 @@
#define GPR_WINDOWS 1 #define GPR_WINDOWS 1
#define GPR_WINDOWS_SUBPROCESS 1 #define GPR_WINDOWS_SUBPROCESS 1
#define GPR_WINDOWS_ENV #define GPR_WINDOWS_ENV
#define GPR_HAS_ALIGNED_MALLOC 1
#ifdef __MSYS__ #ifdef __MSYS__
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#define GPR_MSYS_TMPFILE #define GPR_MSYS_TMPFILE
@ -174,7 +173,6 @@
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_PTHREAD_H 1 #define GPR_HAS_PTHREAD_H 1
#define GPR_HAS_ALIGNED_ALLOC 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#ifdef _LP64 #ifdef _LP64
#define GPR_ARCH_64 1 #define GPR_ARCH_64 1
@ -240,7 +238,6 @@
#define GPR_POSIX_SUBPROCESS 1 #define GPR_POSIX_SUBPROCESS 1
#define GPR_POSIX_SYNC 1 #define GPR_POSIX_SYNC 1
#define GPR_POSIX_TIME 1 #define GPR_POSIX_TIME 1
#define GPR_HAS_POSIX_MEMALIGN 1
#define GPR_HAS_PTHREAD_H 1 #define GPR_HAS_PTHREAD_H 1
#define GPR_GETPID_IN_UNISTD_H 1 #define GPR_GETPID_IN_UNISTD_H 1
#ifndef GRPC_CFSTREAM #ifndef GRPC_CFSTREAM

@ -32,8 +32,6 @@ typedef struct gpr_allocation_functions {
void* (*zalloc_fn)(size_t size); /** if NULL, uses malloc_fn then memset */ void* (*zalloc_fn)(size_t size); /** if NULL, uses malloc_fn then memset */
void* (*realloc_fn)(void* ptr, size_t size); void* (*realloc_fn)(void* ptr, size_t size);
void (*free_fn)(void* ptr); void (*free_fn)(void* ptr);
void* (*aligned_alloc_fn)(size_t size, size_t alignment);
void (*aligned_free_fn)(void* ptr);
} gpr_allocation_functions; } gpr_allocation_functions;
/** malloc. /** malloc.

@ -66,13 +66,12 @@
#define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2 #define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2
// Conversion between subchannel call and call stack. // Conversion between subchannel call and call stack.
#define SUBCHANNEL_CALL_TO_CALL_STACK(call) \ #define SUBCHANNEL_CALL_TO_CALL_STACK(call) \
(grpc_call_stack*)((char*)(call) + GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE( \ (grpc_call_stack*)((char*)(call) + \
sizeof(SubchannelCall))) GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)))
#define CALL_STACK_TO_SUBCHANNEL_CALL(callstack) \ #define CALL_STACK_TO_SUBCHANNEL_CALL(callstack) \
(SubchannelCall*)(((char*)(call_stack)) - \ (SubchannelCall*)(((char*)(call_stack)) - \
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE( \ GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)))
sizeof(SubchannelCall)))
namespace grpc_core { namespace grpc_core {
@ -152,10 +151,10 @@ RefCountedPtr<SubchannelCall> ConnectedSubchannel::CreateCall(
size_t ConnectedSubchannel::GetInitialCallSizeEstimate( size_t ConnectedSubchannel::GetInitialCallSizeEstimate(
size_t parent_data_size) const { size_t parent_data_size) const {
size_t allocation_size = size_t allocation_size =
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(SubchannelCall)); GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall));
if (parent_data_size > 0) { if (parent_data_size > 0) {
allocation_size += allocation_size +=
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(channel_stack_->call_stack_size) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(channel_stack_->call_stack_size) +
parent_data_size; parent_data_size;
} else { } else {
allocation_size += channel_stack_->call_stack_size; allocation_size += channel_stack_->call_stack_size;
@ -179,9 +178,8 @@ void SubchannelCall::StartTransportStreamOpBatch(
void* SubchannelCall::GetParentData() { void* SubchannelCall::GetParentData() {
grpc_channel_stack* chanstk = connected_subchannel_->channel_stack(); grpc_channel_stack* chanstk = connected_subchannel_->channel_stack();
return (char*)this + return (char*)this + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(SubchannelCall)) +
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(SubchannelCall)) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(chanstk->call_stack_size);
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(chanstk->call_stack_size);
} }
grpc_call_stack* SubchannelCall::GetCallStack() { grpc_call_stack* SubchannelCall::GetCallStack() {

@ -47,9 +47,9 @@ grpc_core::TraceFlag grpc_trace_channel(false, "channel");
size_t grpc_channel_stack_size(const grpc_channel_filter** filters, size_t grpc_channel_stack_size(const grpc_channel_filter** filters,
size_t filter_count) { size_t filter_count) {
/* always need the header, and size for the channel elements */ /* always need the header, and size for the channel elements */
size_t size = GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_channel_stack)) + size_t size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_channel_stack)) +
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE( GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filter_count *
filter_count * sizeof(grpc_channel_element)); sizeof(grpc_channel_element));
size_t i; size_t i;
GPR_ASSERT((GPR_MAX_ALIGNMENT & (GPR_MAX_ALIGNMENT - 1)) == 0 && GPR_ASSERT((GPR_MAX_ALIGNMENT & (GPR_MAX_ALIGNMENT - 1)) == 0 &&
@ -57,18 +57,18 @@ size_t grpc_channel_stack_size(const grpc_channel_filter** filters,
/* add the size for each filter */ /* add the size for each filter */
for (i = 0; i < filter_count; i++) { for (i = 0; i < filter_count; i++) {
size += GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data); size += GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
} }
return size; return size;
} }
#define CHANNEL_ELEMS_FROM_STACK(stk) \ #define CHANNEL_ELEMS_FROM_STACK(stk) \
((grpc_channel_element*)((char*)(stk) + GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE( \ ((grpc_channel_element*)((char*)(stk) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE( \
sizeof(grpc_channel_stack)))) sizeof(grpc_channel_stack))))
#define CALL_ELEMS_FROM_STACK(stk) \ #define CALL_ELEMS_FROM_STACK(stk) \
((grpc_call_element*)((char*)(stk) + GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE( \ ((grpc_call_element*)((char*)(stk) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE( \
sizeof(grpc_call_stack)))) sizeof(grpc_call_stack))))
grpc_channel_element* grpc_channel_stack_element( grpc_channel_element* grpc_channel_stack_element(
@ -92,9 +92,8 @@ grpc_error* grpc_channel_stack_init(
const grpc_channel_args* channel_args, grpc_transport* optional_transport, const grpc_channel_args* channel_args, grpc_transport* optional_transport,
const char* name, grpc_channel_stack* stack) { const char* name, grpc_channel_stack* stack) {
size_t call_size = size_t call_size =
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(filter_count * GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
sizeof(grpc_call_element));
grpc_channel_element* elems; grpc_channel_element* elems;
grpc_channel_element_args args; grpc_channel_element_args args;
char* user_data; char* user_data;
@ -105,8 +104,8 @@ grpc_error* grpc_channel_stack_init(
name); name);
elems = CHANNEL_ELEMS_FROM_STACK(stack); elems = CHANNEL_ELEMS_FROM_STACK(stack);
user_data = (reinterpret_cast<char*>(elems)) + user_data = (reinterpret_cast<char*>(elems)) +
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(filter_count * GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filter_count *
sizeof(grpc_channel_element)); sizeof(grpc_channel_element));
/* init per-filter data */ /* init per-filter data */
grpc_error* first_error = GRPC_ERROR_NONE; grpc_error* first_error = GRPC_ERROR_NONE;
@ -127,9 +126,8 @@ grpc_error* grpc_channel_stack_init(
} }
} }
user_data += user_data +=
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data); GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data);
call_size += call_size += GPR_ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(filters[i]->sizeof_call_data);
} }
GPR_ASSERT(user_data > (char*)stack); GPR_ASSERT(user_data > (char*)stack);
@ -164,9 +162,8 @@ grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack,
GRPC_STREAM_REF_INIT(&elem_args->call_stack->refcount, initial_refs, destroy, GRPC_STREAM_REF_INIT(&elem_args->call_stack->refcount, initial_refs, destroy,
destroy_arg, "CALL_STACK"); destroy_arg, "CALL_STACK");
call_elems = CALL_ELEMS_FROM_STACK(elem_args->call_stack); call_elems = CALL_ELEMS_FROM_STACK(elem_args->call_stack);
user_data = user_data = (reinterpret_cast<char*>(call_elems)) +
(reinterpret_cast<char*>(call_elems)) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */ /* init per-filter data */
grpc_error* first_error = GRPC_ERROR_NONE; grpc_error* first_error = GRPC_ERROR_NONE;
@ -174,8 +171,8 @@ grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack,
call_elems[i].filter = channel_elems[i].filter; call_elems[i].filter = channel_elems[i].filter;
call_elems[i].channel_data = channel_elems[i].channel_data; call_elems[i].channel_data = channel_elems[i].channel_data;
call_elems[i].call_data = user_data; call_elems[i].call_data = user_data;
user_data += GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE( user_data +=
call_elems[i].filter->sizeof_call_data); GPR_ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
} }
for (size_t i = 0; i < count; i++) { for (size_t i = 0; i < count; i++) {
grpc_error* error = grpc_error* error =
@ -245,11 +242,11 @@ grpc_channel_stack* grpc_channel_stack_from_top_element(
grpc_channel_element* elem) { grpc_channel_element* elem) {
return reinterpret_cast<grpc_channel_stack*>( return reinterpret_cast<grpc_channel_stack*>(
reinterpret_cast<char*>(elem) - reinterpret_cast<char*>(elem) -
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_channel_stack))); GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_channel_stack)));
} }
grpc_call_stack* grpc_call_stack_from_top_element(grpc_call_element* elem) { grpc_call_stack* grpc_call_stack_from_top_element(grpc_call_element* elem) {
return reinterpret_cast<grpc_call_stack*>( return reinterpret_cast<grpc_call_stack*>(
reinterpret_cast<char*>(elem) - reinterpret_cast<char*>(elem) -
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_call_stack))); GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)));
} }

@ -23,7 +23,6 @@
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/profiling/timers.h" #include "src/core/lib/profiling/timers.h"
static void* zalloc_with_calloc(size_t sz) { return calloc(sz, 1); } static void* zalloc_with_calloc(size_t sz) { return calloc(sz, 1); }
@ -34,56 +33,8 @@ static void* zalloc_with_gpr_malloc(size_t sz) {
return p; return p;
} }
#ifndef NDEBUG static gpr_allocation_functions g_alloc_functions = {malloc, zalloc_with_calloc,
static constexpr bool is_power_of_two(size_t value) { realloc, free};
// 2^N = 100000...000
// 2^N - 1 = 011111...111
// (2^N) && ((2^N)-1)) = 0
return (value & (value - 1)) == 0;
}
#endif
static void* platform_malloc_aligned(size_t size, size_t alignment) {
#if defined(GPR_HAS_ALIGNED_ALLOC)
GPR_DEBUG_ASSERT(is_power_of_two(alignment));
size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(size, alignment);
void* ret = aligned_alloc(alignment, size);
GPR_ASSERT(ret != nullptr);
return ret;
#elif defined(GPR_HAS_ALIGNED_MALLOC)
GPR_DEBUG_ASSERT(is_power_of_two(alignment));
void* ret = _aligned_malloc(size, alignment);
GPR_ASSERT(ret != nullptr);
return ret;
#elif defined(GPR_HAS_POSIX_MEMALIGN)
GPR_DEBUG_ASSERT(is_power_of_two(alignment));
GPR_DEBUG_ASSERT(alignment % sizeof(void*) == 0);
void* ret = nullptr;
GPR_ASSERT(posix_memalign(&ret, alignment, size) == 0);
return ret;
#else
GPR_DEBUG_ASSERT(is_power_of_two(alignment));
size_t extra = alignment - 1 + sizeof(void*);
void* p = gpr_malloc(size + extra);
void** ret = (void**)(((uintptr_t)p + extra) & ~(alignment - 1));
ret[-1] = p;
return (void*)ret;
#endif
}
static void platform_free_aligned(void* ptr) {
#if defined(GPR_HAS_ALIGNED_ALLOC) || defined(GPR_HAS_POSIX_MEMALIGN)
free(ptr);
#elif defined(GPR_HAS_ALIGNED_MALLOC)
_aligned_free(ptr);
#else
gpr_free((static_cast<void**>(ptr))[-1]);
#endif
}
static gpr_allocation_functions g_alloc_functions = {
malloc, zalloc_with_calloc, realloc,
free, platform_malloc_aligned, platform_free_aligned};
gpr_allocation_functions gpr_get_allocation_functions() { gpr_allocation_functions gpr_get_allocation_functions() {
return g_alloc_functions; return g_alloc_functions;
@ -96,12 +47,6 @@ void gpr_set_allocation_functions(gpr_allocation_functions functions) {
if (functions.zalloc_fn == nullptr) { if (functions.zalloc_fn == nullptr) {
functions.zalloc_fn = zalloc_with_gpr_malloc; functions.zalloc_fn = zalloc_with_gpr_malloc;
} }
GPR_ASSERT((functions.aligned_alloc_fn == nullptr) ==
(functions.aligned_free_fn == nullptr));
if (functions.aligned_alloc_fn == nullptr) {
functions.aligned_alloc_fn = platform_malloc_aligned;
functions.aligned_free_fn = platform_free_aligned;
}
g_alloc_functions = functions; g_alloc_functions = functions;
} }
@ -143,12 +88,12 @@ void* gpr_realloc(void* p, size_t size) {
} }
void* gpr_malloc_aligned(size_t size, size_t alignment) { void* gpr_malloc_aligned(size_t size, size_t alignment) {
GPR_TIMER_SCOPE("gpr_malloc_aligned", 0); GPR_ASSERT(((alignment - 1) & alignment) == 0); // Must be power of 2.
if (size == 0) return nullptr; size_t extra = alignment - 1 + sizeof(void*);
return g_alloc_functions.aligned_alloc_fn(size, alignment); void* p = gpr_malloc(size + extra);
void** ret = (void**)(((uintptr_t)p + extra) & ~(alignment - 1));
ret[-1] = p;
return (void*)ret;
} }
void gpr_free_aligned(void* ptr) { void gpr_free_aligned(void* ptr) { gpr_free((static_cast<void**>(ptr))[-1]); }
GPR_TIMER_SCOPE("gpr_free_aligned", 0);
g_alloc_functions.aligned_free_fn(ptr);
}

@ -22,13 +22,7 @@
#include <grpc/support/port_platform.h> #include <grpc/support/port_platform.h>
/// Given a size, round up to the next multiple of sizeof(void*). /// Given a size, round up to the next multiple of sizeof(void*).
#define GPR_ROUND_UP_TO_ALIGNMENT_SIZE(x, align) \ #define GPR_ROUND_UP_TO_ALIGNMENT_SIZE(x) \
(((x) + (align)-1u) & ~((align)-1u)) (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u))
#define GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(x) \
GPR_ROUND_UP_TO_ALIGNMENT_SIZE((x), GPR_MAX_ALIGNMENT)
#define GPR_ROUND_UP_TO_CACHELINE_SIZE(x) \
GPR_ROUND_UP_TO_ALIGNMENT_SIZE((x), GPR_CACHELINE_SIZE)
#endif /* GRPC_CORE_LIB_GPR_ALLOC_H */ #endif /* GRPC_CORE_LIB_GPR_ALLOC_H */

@ -35,8 +35,8 @@ namespace {
void* ArenaStorage(size_t initial_size) { void* ArenaStorage(size_t initial_size) {
static constexpr size_t base_size = static constexpr size_t base_size =
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_core::Arena)); GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_core::Arena));
initial_size = GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(initial_size); initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
size_t alloc_size = base_size + initial_size; size_t alloc_size = base_size + initial_size;
static constexpr size_t alignment = static constexpr size_t alignment =
(GPR_CACHELINE_SIZE > GPR_MAX_ALIGNMENT && (GPR_CACHELINE_SIZE > GPR_MAX_ALIGNMENT &&
@ -67,7 +67,7 @@ Arena* Arena::Create(size_t initial_size) {
Pair<Arena*, void*> Arena::CreateWithAlloc(size_t initial_size, Pair<Arena*, void*> Arena::CreateWithAlloc(size_t initial_size,
size_t alloc_size) { size_t alloc_size) {
static constexpr size_t base_size = static constexpr size_t base_size =
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(Arena)); GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Arena));
auto* new_arena = auto* new_arena =
new (ArenaStorage(initial_size)) Arena(initial_size, alloc_size); new (ArenaStorage(initial_size)) Arena(initial_size, alloc_size);
void* first_alloc = reinterpret_cast<char*>(new_arena) + base_size; void* first_alloc = reinterpret_cast<char*>(new_arena) + base_size;
@ -88,7 +88,7 @@ void* Arena::AllocZone(size_t size) {
// sizing hysteresis (that is, most calls should have a large enough initial // sizing hysteresis (that is, most calls should have a large enough initial
// zone and will not need to grow the arena). // zone and will not need to grow the arena).
static constexpr size_t zone_base_size = static constexpr size_t zone_base_size =
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(Zone)); GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Zone));
size_t alloc_size = zone_base_size + size; size_t alloc_size = zone_base_size + size;
Zone* z = new (gpr_malloc_aligned(alloc_size, GPR_MAX_ALIGNMENT)) Zone(); Zone* z = new (gpr_malloc_aligned(alloc_size, GPR_MAX_ALIGNMENT)) Zone();
{ {

@ -58,10 +58,10 @@ class Arena {
// Allocate \a size bytes from the arena. // Allocate \a size bytes from the arena.
void* Alloc(size_t size) { void* Alloc(size_t size) {
static constexpr size_t base_size = static constexpr size_t base_size =
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(Arena)); GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Arena));
size = GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(size); size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(size);
size_t begin = total_used_.FetchAdd(size, MemoryOrder::RELAXED); size_t begin = total_used_.FetchAdd(size, MemoryOrder::RELAXED);
if (GPR_LIKELY(begin + size <= initial_zone_size_)) { if (begin + size <= initial_zone_size_) {
return reinterpret_cast<char*>(this) + base_size + begin; return reinterpret_cast<char*>(this) + base_size + begin;
} else { } else {
return AllocZone(size); return AllocZone(size);

@ -260,10 +260,10 @@ grpc_core::TraceFlag grpc_compression_trace(false, "compression");
#define CALL_STACK_FROM_CALL(call) \ #define CALL_STACK_FROM_CALL(call) \
(grpc_call_stack*)((char*)(call) + \ (grpc_call_stack*)((char*)(call) + \
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_call))) GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)))
#define CALL_FROM_CALL_STACK(call_stack) \ #define CALL_FROM_CALL_STACK(call_stack) \
(grpc_call*)(((char*)(call_stack)) - \ (grpc_call*)(((char*)(call_stack)) - \
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_call))) GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)))
#define CALL_ELEM_FROM_CALL(call, idx) \ #define CALL_ELEM_FROM_CALL(call, idx) \
grpc_call_stack_element(CALL_STACK_FROM_CALL(call), idx) grpc_call_stack_element(CALL_STACK_FROM_CALL(call), idx)
@ -329,7 +329,7 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
size_t initial_size = grpc_channel_get_call_size_estimate(args->channel); size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size); GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size);
size_t call_and_stack_size = size_t call_and_stack_size =
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(grpc_call)) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
channel_stack->call_stack_size; channel_stack->call_stack_size;
size_t call_alloc_size = size_t call_alloc_size =
call_and_stack_size + (args->parent ? sizeof(child_call) : 0); call_and_stack_size + (args->parent ? sizeof(child_call) : 0);

@ -115,7 +115,7 @@ void grpc_transport_move_stats(grpc_transport_stream_stats* from,
} }
size_t grpc_transport_stream_size(grpc_transport* transport) { size_t grpc_transport_stream_size(grpc_transport* transport) {
return GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(transport->vtable->sizeof_stream); return GPR_ROUND_UP_TO_ALIGNMENT_SIZE(transport->vtable->sizeof_stream);
} }
void grpc_transport_destroy(grpc_transport* transport) { void grpc_transport_destroy(grpc_transport* transport) {

@ -31,21 +31,12 @@ static void fake_free(void* addr) {
*(static_cast<intptr_t*>(addr)) = static_cast<intptr_t>(0xdeadd00d); *(static_cast<intptr_t*>(addr)) = static_cast<intptr_t>(0xdeadd00d);
} }
static void* fake_aligned_malloc(size_t size, size_t alignment) {
return (void*)(size + alignment);
}
static void fake_aligned_free(void* addr) {
*(static_cast<intptr_t*>(addr)) = static_cast<intptr_t>(0xcafef00d);
}
static void test_custom_allocs() { static void test_custom_allocs() {
const gpr_allocation_functions default_fns = gpr_get_allocation_functions(); const gpr_allocation_functions default_fns = gpr_get_allocation_functions();
intptr_t addr_to_free = 0; intptr_t addr_to_free = 0;
char* i; char* i;
gpr_allocation_functions fns = {fake_malloc, nullptr, gpr_allocation_functions fns = {fake_malloc, nullptr, fake_realloc,
fake_realloc, fake_free, fake_free};
fake_aligned_malloc, fake_aligned_free};
gpr_set_allocation_functions(fns); gpr_set_allocation_functions(fns);
GPR_ASSERT((void*)(size_t)0xdeadbeef == gpr_malloc(0xdeadbeef)); GPR_ASSERT((void*)(size_t)0xdeadbeef == gpr_malloc(0xdeadbeef));
@ -54,11 +45,6 @@ static void test_custom_allocs() {
gpr_free(&addr_to_free); gpr_free(&addr_to_free);
GPR_ASSERT(addr_to_free == (intptr_t)0xdeadd00d); GPR_ASSERT(addr_to_free == (intptr_t)0xdeadd00d);
GPR_ASSERT((void*)(size_t)(0xdeadbeef + 64) ==
gpr_malloc_aligned(0xdeadbeef, 64));
gpr_free_aligned(&addr_to_free);
GPR_ASSERT(addr_to_free == (intptr_t)0xcafef00d);
/* Restore and check we don't get funky values and that we don't leak */ /* Restore and check we don't get funky values and that we don't leak */
gpr_set_allocation_functions(default_fns); gpr_set_allocation_functions(default_fns);
GPR_ASSERT((void*)sizeof(*i) != GPR_ASSERT((void*)sizeof(*i) !=

@ -54,10 +54,9 @@ static void* guard_malloc(size_t size) {
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1); NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1);
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, (gpr_atm)1); NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, (gpr_atm)1);
void* ptr = g_old_allocs.malloc_fn( void* ptr = g_old_allocs.malloc_fn(
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size)) + size); GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size)) + size);
*static_cast<size_t*>(ptr) = size; *static_cast<size_t*>(ptr) = size;
return static_cast<char*>(ptr) + return static_cast<char*>(ptr) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size));
} }
static void* guard_realloc(void* vptr, size_t size) { static void* guard_realloc(void* vptr, size_t size) {
@ -68,36 +67,31 @@ static void* guard_realloc(void* vptr, size_t size) {
guard_free(vptr); guard_free(vptr);
return nullptr; return nullptr;
} }
void* ptr = static_cast<char*>(vptr) - void* ptr =
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size)); static_cast<char*>(vptr) - GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_absolute, (gpr_atm)size); NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_absolute, (gpr_atm)size);
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative, NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative,
-*static_cast<gpr_atm*>(ptr)); -*static_cast<gpr_atm*>(ptr));
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative, (gpr_atm)size); NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative, (gpr_atm)size);
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1); NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_absolute, (gpr_atm)1);
ptr = g_old_allocs.realloc_fn( ptr = g_old_allocs.realloc_fn(
ptr, GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size)) + size); ptr, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size)) + size);
*static_cast<size_t*>(ptr) = size; *static_cast<size_t*>(ptr) = size;
return static_cast<char*>(ptr) + return static_cast<char*>(ptr) + GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size));
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size));
} }
static void guard_free(void* vptr) { static void guard_free(void* vptr) {
if (vptr == nullptr) return; if (vptr == nullptr) return;
void* ptr = static_cast<char*>(vptr) - void* ptr =
GPR_ROUND_UP_TO_MAX_ALIGNMENT_SIZE(sizeof(size_t)); static_cast<char*>(vptr) - GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(size_t));
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative, NO_BARRIER_FETCH_ADD(&g_memory_counters.total_size_relative,
-*static_cast<gpr_atm*>(ptr)); -*static_cast<gpr_atm*>(ptr));
NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, -(gpr_atm)1); NO_BARRIER_FETCH_ADD(&g_memory_counters.total_allocs_relative, -(gpr_atm)1);
g_old_allocs.free_fn(ptr); g_old_allocs.free_fn(ptr);
} }
// NB: We do not specify guard_malloc_aligned/guard_free_aligned methods. Since struct gpr_allocation_functions g_guard_allocs = {guard_malloc, nullptr,
// they are null, calls to gpr_malloc_aligned/gpr_free_aligned are executed as a guard_realloc, guard_free};
// wrapper over gpr_malloc/gpr_free, which do use guard_malloc/guard_free, and
// thus their allocations are tracked as well.
struct gpr_allocation_functions g_guard_allocs = {
guard_malloc, nullptr, guard_realloc, guard_free, nullptr, nullptr};
void grpc_memory_counters_init() { void grpc_memory_counters_init() {
memset(&g_memory_counters, 0, sizeof(g_memory_counters)); memset(&g_memory_counters, 0, sizeof(g_memory_counters));

Loading…
Cancel
Save