Shrink arena size by 40 bytes and add additional

alignment options (for cache-alignment).

We shrink by:
1) Removing an unnecessary zone pointer.
2) Replacing gpr_mu (40 bytes when using pthread_mutex_t) with
std::atomic_flag.

We also header-inline the fastpath alloc (ie. when not doing a zone
alloc) and move the malloc() for a zone alloc outside of the mutex
critical zone, which allows us to replace the mutex with a spinlock.

We also cache-align created arenas.
pull/18786/head
Arjun Roy 6 years ago
parent 2a482ca4b9
commit 8ce42f67b2
  1. 4
      BUILD
  2. 4
      BUILD.gn
  3. 2
      CMakeLists.txt
  4. 2
      Makefile
  5. 3
      build.yaml
  6. 2
      config.m4
  7. 2
      config.w32
  8. 2
      gRPC-C++.podspec
  9. 4
      gRPC-Core.podspec
  10. 3
      grpc.gemspec
  11. 2
      grpc.gyp
  12. 3
      package.xml
  13. 32
      src/core/ext/filters/client_channel/client_channel.cc
  14. 6
      src/core/ext/filters/client_channel/health/health_check_client.cc
  15. 4
      src/core/ext/filters/client_channel/health/health_check_client.h
  16. 2
      src/core/ext/filters/client_channel/subchannel.cc
  17. 4
      src/core/ext/filters/client_channel/subchannel.h
  18. 7
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  19. 2
      src/core/ext/transport/chttp2/transport/incoming_metadata.cc
  20. 5
      src/core/ext/transport/chttp2/transport/incoming_metadata.h
  21. 2
      src/core/ext/transport/chttp2/transport/internal.h
  22. 13
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  23. 18
      src/core/ext/transport/inproc/inproc_transport.cc
  24. 4
      src/core/lib/channel/channel_stack.h
  25. 152
      src/core/lib/gpr/arena.cc
  26. 20
      src/core/lib/gpr/arena.h
  27. 90
      src/core/lib/gprpp/arena.cc
  28. 99
      src/core/lib/gprpp/arena.h
  29. 13
      src/core/lib/security/context/security_context.cc
  30. 7
      src/core/lib/security/context/security_context.h
  31. 30
      src/core/lib/surface/call.cc
  32. 3
      src/core/lib/surface/call.h
  33. 3
      src/core/lib/transport/transport.cc
  34. 5
      src/core/lib/transport/transport.h
  35. 2
      src/core/lib/transport/transport_impl.h
  36. 2
      src/python/grpcio/grpc_core_dependencies.py
  37. 20
      test/core/gpr/arena_test.cc
  38. 22
      test/cpp/microbenchmarks/bm_arena.cc
  39. 10
      test/cpp/microbenchmarks/bm_call_create.cc
  40. 11
      test/cpp/microbenchmarks/bm_chttp2_hpack.cc
  41. 10
      test/cpp/microbenchmarks/bm_chttp2_transport.cc
  42. 1
      tools/doxygen/Doxyfile.c++.internal
  43. 3
      tools/doxygen/Doxyfile.core.internal
  44. 4
      tools/run_tests/generated/sources_and_headers.json

@ -541,7 +541,6 @@ grpc_cc_library(
name = "gpr_base",
srcs = [
"src/core/lib/gpr/alloc.cc",
"src/core/lib/gpr/arena.cc",
"src/core/lib/gpr/atm.cc",
"src/core/lib/gpr/cpu_iphone.cc",
"src/core/lib/gpr/cpu_linux.cc",
@ -574,6 +573,7 @@ grpc_cc_library(
"src/core/lib/gpr/tmpfile_posix.cc",
"src/core/lib/gpr/tmpfile_windows.cc",
"src/core/lib/gpr/wrap_memcpy.cc",
"src/core/lib/gprpp/arena.cc",
"src/core/lib/gprpp/fork.cc",
"src/core/lib/gprpp/thd_posix.cc",
"src/core/lib/gprpp/thd_windows.cc",
@ -598,6 +598,8 @@ grpc_cc_library(
"src/core/lib/gpr/tmpfile.h",
"src/core/lib/gpr/useful.h",
"src/core/lib/gprpp/abstract.h",
"src/core/lib/gprpp/arena.h",
"src/core/lib/gprpp/atomic.h",
"src/core/lib/gprpp/fork.h",
"src/core/lib/gprpp/manual_constructor.h",
"src/core/lib/gprpp/map.h",

@ -131,7 +131,6 @@ config("grpc_config") {
"include/grpc/support/time.h",
"src/core/lib/gpr/alloc.cc",
"src/core/lib/gpr/alloc.h",
"src/core/lib/gpr/arena.cc",
"src/core/lib/gpr/arena.h",
"src/core/lib/gpr/atm.cc",
"src/core/lib/gpr/cpu_iphone.cc",
@ -180,6 +179,8 @@ config("grpc_config") {
"src/core/lib/gpr/useful.h",
"src/core/lib/gpr/wrap_memcpy.cc",
"src/core/lib/gprpp/abstract.h",
"src/core/lib/gprpp/arena.cc",
"src/core/lib/gprpp/arena.h",
"src/core/lib/gprpp/atomic.h",
"src/core/lib/gprpp/fork.cc",
"src/core/lib/gprpp/fork.h",
@ -1157,6 +1158,7 @@ config("grpc_config") {
"src/core/lib/gpr/tmpfile.h",
"src/core/lib/gpr/useful.h",
"src/core/lib/gprpp/abstract.h",
"src/core/lib/gprpp/arena.h",
"src/core/lib/gprpp/atomic.h",
"src/core/lib/gprpp/debug_location.h",
"src/core/lib/gprpp/fork.h",

@ -839,7 +839,6 @@ endif (gRPC_BUILD_TESTS)
add_library(gpr
src/core/lib/gpr/alloc.cc
src/core/lib/gpr/arena.cc
src/core/lib/gpr/atm.cc
src/core/lib/gpr/cpu_iphone.cc
src/core/lib/gpr/cpu_linux.cc
@ -872,6 +871,7 @@ add_library(gpr
src/core/lib/gpr/tmpfile_posix.cc
src/core/lib/gpr/tmpfile_windows.cc
src/core/lib/gpr/wrap_memcpy.cc
src/core/lib/gprpp/arena.cc
src/core/lib/gprpp/fork.cc
src/core/lib/gprpp/thd_posix.cc
src/core/lib/gprpp/thd_windows.cc

@ -3320,7 +3320,6 @@ endif
LIBGPR_SRC = \
src/core/lib/gpr/alloc.cc \
src/core/lib/gpr/arena.cc \
src/core/lib/gpr/atm.cc \
src/core/lib/gpr/cpu_iphone.cc \
src/core/lib/gpr/cpu_linux.cc \
@ -3353,6 +3352,7 @@ LIBGPR_SRC = \
src/core/lib/gpr/tmpfile_posix.cc \
src/core/lib/gpr/tmpfile_windows.cc \
src/core/lib/gpr/wrap_memcpy.cc \
src/core/lib/gprpp/arena.cc \
src/core/lib/gprpp/fork.cc \
src/core/lib/gprpp/thd_posix.cc \
src/core/lib/gprpp/thd_windows.cc \

@ -114,7 +114,6 @@ filegroups:
- name: gpr_base
src:
- src/core/lib/gpr/alloc.cc
- src/core/lib/gpr/arena.cc
- src/core/lib/gpr/atm.cc
- src/core/lib/gpr/cpu_iphone.cc
- src/core/lib/gpr/cpu_linux.cc
@ -147,6 +146,7 @@ filegroups:
- src/core/lib/gpr/tmpfile_posix.cc
- src/core/lib/gpr/tmpfile_windows.cc
- src/core/lib/gpr/wrap_memcpy.cc
- src/core/lib/gprpp/arena.cc
- src/core/lib/gprpp/fork.cc
- src/core/lib/gprpp/thd_posix.cc
- src/core/lib/gprpp/thd_windows.cc
@ -191,6 +191,7 @@ filegroups:
- src/core/lib/gpr/tmpfile.h
- src/core/lib/gpr/useful.h
- src/core/lib/gprpp/abstract.h
- src/core/lib/gprpp/arena.h
- src/core/lib/gprpp/atomic.h
- src/core/lib/gprpp/fork.h
- src/core/lib/gprpp/manual_constructor.h

@ -45,7 +45,6 @@ if test "$PHP_GRPC" != "no"; then
third_party/address_sorting/address_sorting_posix.c \
third_party/address_sorting/address_sorting_windows.c \
src/core/lib/gpr/alloc.cc \
src/core/lib/gpr/arena.cc \
src/core/lib/gpr/atm.cc \
src/core/lib/gpr/cpu_iphone.cc \
src/core/lib/gpr/cpu_linux.cc \
@ -78,6 +77,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/gpr/tmpfile_posix.cc \
src/core/lib/gpr/tmpfile_windows.cc \
src/core/lib/gpr/wrap_memcpy.cc \
src/core/lib/gprpp/arena.cc \
src/core/lib/gprpp/fork.cc \
src/core/lib/gprpp/thd_posix.cc \
src/core/lib/gprpp/thd_windows.cc \

@ -20,7 +20,6 @@ if (PHP_GRPC != "no") {
"third_party\\address_sorting\\address_sorting_posix.c " +
"third_party\\address_sorting\\address_sorting_windows.c " +
"src\\core\\lib\\gpr\\alloc.cc " +
"src\\core\\lib\\gpr\\arena.cc " +
"src\\core\\lib\\gpr\\atm.cc " +
"src\\core\\lib\\gpr\\cpu_iphone.cc " +
"src\\core\\lib\\gpr\\cpu_linux.cc " +
@ -53,6 +52,7 @@ if (PHP_GRPC != "no") {
"src\\core\\lib\\gpr\\tmpfile_posix.cc " +
"src\\core\\lib\\gpr\\tmpfile_windows.cc " +
"src\\core\\lib\\gpr\\wrap_memcpy.cc " +
"src\\core\\lib\\gprpp\\arena.cc " +
"src\\core\\lib\\gprpp\\fork.cc " +
"src\\core\\lib\\gprpp\\thd_posix.cc " +
"src\\core\\lib\\gprpp\\thd_windows.cc " +

@ -264,6 +264,7 @@ Pod::Spec.new do |s|
'src/core/lib/gpr/tmpfile.h',
'src/core/lib/gpr/useful.h',
'src/core/lib/gprpp/abstract.h',
'src/core/lib/gprpp/arena.h',
'src/core/lib/gprpp/atomic.h',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/manual_constructor.h',
@ -582,6 +583,7 @@ Pod::Spec.new do |s|
'src/core/lib/gpr/tmpfile.h',
'src/core/lib/gpr/useful.h',
'src/core/lib/gprpp/abstract.h',
'src/core/lib/gprpp/arena.h',
'src/core/lib/gprpp/atomic.h',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/manual_constructor.h',

@ -205,6 +205,7 @@ Pod::Spec.new do |s|
'src/core/lib/gpr/tmpfile.h',
'src/core/lib/gpr/useful.h',
'src/core/lib/gprpp/abstract.h',
'src/core/lib/gprpp/arena.h',
'src/core/lib/gprpp/atomic.h',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/manual_constructor.h',
@ -215,7 +216,6 @@ Pod::Spec.new do |s|
'src/core/lib/gprpp/thd.h',
'src/core/lib/profiling/timers.h',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/arena.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
@ -248,6 +248,7 @@ Pod::Spec.new do |s|
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/arena.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
@ -886,6 +887,7 @@ Pod::Spec.new do |s|
'src/core/lib/gpr/tmpfile.h',
'src/core/lib/gpr/useful.h',
'src/core/lib/gprpp/abstract.h',
'src/core/lib/gprpp/arena.h',
'src/core/lib/gprpp/atomic.h',
'src/core/lib/gprpp/fork.h',
'src/core/lib/gprpp/manual_constructor.h',

@ -99,6 +99,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/gpr/tmpfile.h )
s.files += %w( src/core/lib/gpr/useful.h )
s.files += %w( src/core/lib/gprpp/abstract.h )
s.files += %w( src/core/lib/gprpp/arena.h )
s.files += %w( src/core/lib/gprpp/atomic.h )
s.files += %w( src/core/lib/gprpp/fork.h )
s.files += %w( src/core/lib/gprpp/manual_constructor.h )
@ -109,7 +110,6 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/gprpp/thd.h )
s.files += %w( src/core/lib/profiling/timers.h )
s.files += %w( src/core/lib/gpr/alloc.cc )
s.files += %w( src/core/lib/gpr/arena.cc )
s.files += %w( src/core/lib/gpr/atm.cc )
s.files += %w( src/core/lib/gpr/cpu_iphone.cc )
s.files += %w( src/core/lib/gpr/cpu_linux.cc )
@ -142,6 +142,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/gpr/tmpfile_posix.cc )
s.files += %w( src/core/lib/gpr/tmpfile_windows.cc )
s.files += %w( src/core/lib/gpr/wrap_memcpy.cc )
s.files += %w( src/core/lib/gprpp/arena.cc )
s.files += %w( src/core/lib/gprpp/fork.cc )
s.files += %w( src/core/lib/gprpp/thd_posix.cc )
s.files += %w( src/core/lib/gprpp/thd_windows.cc )

@ -218,7 +218,6 @@
],
'sources': [
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/arena.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
@ -251,6 +250,7 @@
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/arena.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',

@ -104,6 +104,7 @@
<file baseinstalldir="/" name="src/core/lib/gpr/tmpfile.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/useful.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/abstract.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/arena.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/atomic.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/fork.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/manual_constructor.h" role="src" />
@ -114,7 +115,6 @@
<file baseinstalldir="/" name="src/core/lib/gprpp/thd.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/profiling/timers.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/alloc.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/arena.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/atm.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/cpu_iphone.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/cpu_linux.cc" role="src" />
@ -147,6 +147,7 @@
<file baseinstalldir="/" name="src/core/lib/gpr/tmpfile_posix.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/tmpfile_windows.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gpr/wrap_memcpy.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/arena.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/fork.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/thd_posix.cc" role="src" />
<file baseinstalldir="/" name="src/core/lib/gprpp/thd_windows.cc" role="src" />

@ -615,7 +615,7 @@ class CallData {
grpc_slice path_; // Request path.
gpr_timespec call_start_time_;
grpc_millis deadline_;
gpr_arena* arena_;
Arena* arena_;
grpc_call_stack* owning_call_;
CallCombiner* call_combiner_;
grpc_call_context_element* call_context_;
@ -1483,8 +1483,8 @@ void CallData::MaybeCacheSendOpsForBatch(PendingBatch* pending) {
GPR_ASSERT(send_initial_metadata_storage_ == nullptr);
grpc_metadata_batch* send_initial_metadata =
batch->payload->send_initial_metadata.send_initial_metadata;
send_initial_metadata_storage_ = (grpc_linked_mdelem*)gpr_arena_alloc(
arena_, sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count);
send_initial_metadata_storage_ = (grpc_linked_mdelem*)arena_->Alloc(
sizeof(grpc_linked_mdelem) * send_initial_metadata->list.count);
grpc_metadata_batch_copy(send_initial_metadata, &send_initial_metadata_,
send_initial_metadata_storage_);
send_initial_metadata_flags_ =
@ -1493,10 +1493,8 @@ void CallData::MaybeCacheSendOpsForBatch(PendingBatch* pending) {
}
// Set up cache for send_message ops.
if (batch->send_message) {
ByteStreamCache* cache = static_cast<ByteStreamCache*>(
gpr_arena_alloc(arena_, sizeof(ByteStreamCache)));
new (cache)
ByteStreamCache(std::move(batch->payload->send_message.send_message));
ByteStreamCache* cache = arena_->New<ByteStreamCache>(
std::move(batch->payload->send_message.send_message));
send_messages_.push_back(cache);
}
// Save metadata batch for send_trailing_metadata ops.
@ -1505,8 +1503,7 @@ void CallData::MaybeCacheSendOpsForBatch(PendingBatch* pending) {
GPR_ASSERT(send_trailing_metadata_storage_ == nullptr);
grpc_metadata_batch* send_trailing_metadata =
batch->payload->send_trailing_metadata.send_trailing_metadata;
send_trailing_metadata_storage_ = (grpc_linked_mdelem*)gpr_arena_alloc(
arena_,
send_trailing_metadata_storage_ = (grpc_linked_mdelem*)arena_->Alloc(
sizeof(grpc_linked_mdelem) * send_trailing_metadata->list.count);
grpc_metadata_batch_copy(send_trailing_metadata, &send_trailing_metadata_,
send_trailing_metadata_storage_);
@ -1994,10 +1991,8 @@ bool CallData::MaybeRetry(grpc_call_element* elem,
CallData::SubchannelCallBatchData* CallData::SubchannelCallBatchData::Create(
grpc_call_element* elem, int refcount, bool set_on_complete) {
CallData* calld = static_cast<CallData*>(elem->call_data);
SubchannelCallBatchData* batch_data =
new (gpr_arena_alloc(calld->arena_, sizeof(*batch_data)))
SubchannelCallBatchData(elem, calld, refcount, set_on_complete);
return batch_data;
return calld->arena_->New<SubchannelCallBatchData>(elem, calld, refcount,
set_on_complete);
}
CallData::SubchannelCallBatchData::SubchannelCallBatchData(
@ -2589,10 +2584,10 @@ void CallData::AddRetriableSendInitialMetadataOp(
//
// If we've already completed one or more attempts, add the
// grpc-retry-attempts header.
retry_state->send_initial_metadata_storage = static_cast<grpc_linked_mdelem*>(
gpr_arena_alloc(arena_, sizeof(grpc_linked_mdelem) *
(send_initial_metadata_.list.count +
(num_attempts_completed_ > 0))));
retry_state->send_initial_metadata_storage =
static_cast<grpc_linked_mdelem*>(arena_->Alloc(
sizeof(grpc_linked_mdelem) *
(send_initial_metadata_.list.count + (num_attempts_completed_ > 0))));
grpc_metadata_batch_copy(&send_initial_metadata_,
&retry_state->send_initial_metadata,
retry_state->send_initial_metadata_storage);
@ -2651,8 +2646,7 @@ void CallData::AddRetriableSendTrailingMetadataOp(
// the filters in the subchannel stack may modify this batch, and we don't
// want those modifications to be passed forward to subsequent attempts.
retry_state->send_trailing_metadata_storage =
static_cast<grpc_linked_mdelem*>(gpr_arena_alloc(
arena_,
static_cast<grpc_linked_mdelem*>(arena_->Alloc(
sizeof(grpc_linked_mdelem) * send_trailing_metadata_.list.count));
grpc_metadata_batch_copy(&send_trailing_metadata_,
&retry_state->send_trailing_metadata,

@ -280,8 +280,8 @@ HealthCheckClient::CallState::CallState(
: InternallyRefCounted<CallState>(&grpc_health_check_client_trace),
health_check_client_(std::move(health_check_client)),
pollent_(grpc_polling_entity_create_from_pollset_set(interested_parties)),
arena_(gpr_arena_create(health_check_client_->connected_subchannel_
->GetInitialCallSizeEstimate(0))),
arena_(Arena::Create(health_check_client_->connected_subchannel_
->GetInitialCallSizeEstimate(0))),
payload_(context_) {}
HealthCheckClient::CallState::~CallState() {
@ -302,7 +302,7 @@ HealthCheckClient::CallState::~CallState() {
// need to take a ref of the call stack to guarantee closure liveness.
call_combiner_.SetNotifyOnCancel(nullptr);
ExecCtx::Get()->Flush();
gpr_arena_destroy(arena_);
arena_->Destroy();
}
void HealthCheckClient::CallState::Orphan() {

@ -27,7 +27,7 @@
#include "src/core/ext/filters/client_channel/client_channel_channelz.h"
#include "src/core/ext/filters/client_channel/subchannel.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/gprpp/atomic.h"
#include "src/core/lib/gprpp/orphanable.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
@ -97,7 +97,7 @@ class HealthCheckClient : public InternallyRefCounted<HealthCheckClient> {
RefCountedPtr<HealthCheckClient> health_check_client_;
grpc_polling_entity pollent_;
gpr_arena* arena_;
Arena* arena_;
grpc_core::CallCombiner call_combiner_;
grpc_call_context_element context_[GRPC_CONTEXT_COUNT] = {};

@ -121,7 +121,7 @@ RefCountedPtr<SubchannelCall> ConnectedSubchannel::CreateCall(
const size_t allocation_size =
GetInitialCallSizeEstimate(args.parent_data_size);
RefCountedPtr<SubchannelCall> call(
new (gpr_arena_alloc(args.arena, allocation_size))
new (args.arena->Alloc(allocation_size))
SubchannelCall(Ref(DEBUG_LOCATION, "subchannel_call"), args));
grpc_call_stack* callstk = SUBCHANNEL_CALL_TO_CALL_STACK(call.get());
const grpc_call_element_args call_args = {

@ -26,7 +26,7 @@
#include "src/core/ext/filters/client_channel/subchannel_pool_interface.h"
#include "src/core/lib/backoff/backoff.h"
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/gprpp/sync.h"
@ -75,7 +75,7 @@ class ConnectedSubchannel : public RefCounted<ConnectedSubchannel> {
grpc_slice path;
gpr_timespec start_time;
grpc_millis deadline;
gpr_arena* arena;
Arena* arena;
grpc_call_context_element* context;
grpc_core::CallCombiner* call_combiner;
size_t parent_data_size;

@ -655,11 +655,12 @@ grpc_chttp2_stream::Reffer::Reffer(grpc_chttp2_stream* s) {
grpc_chttp2_stream::grpc_chttp2_stream(grpc_chttp2_transport* t,
grpc_stream_refcount* refcount,
const void* server_data,
gpr_arena* arena)
grpc_core::Arena* arena)
: t(t),
refcount(refcount),
reffer(this),
metadata_buffer{{arena}, {arena}} {
metadata_buffer{grpc_chttp2_incoming_metadata_buffer(arena),
grpc_chttp2_incoming_metadata_buffer(arena)} {
if (server_data) {
id = static_cast<uint32_t>((uintptr_t)server_data);
*t->accepting_stream = this;
@ -740,7 +741,7 @@ grpc_chttp2_stream::~grpc_chttp2_stream() {
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena) {
grpc_core::Arena* arena) {
GPR_TIMER_SCOPE("init_stream", 0);
grpc_chttp2_transport* t = reinterpret_cast<grpc_chttp2_transport*>(gt);
new (gs) grpc_chttp2_stream(t, refcount, server_data, arena);

@ -36,7 +36,7 @@ grpc_error* grpc_chttp2_incoming_metadata_buffer_add(
buffer->count++;
} else {
storage = static_cast<grpc_linked_mdelem*>(
gpr_arena_alloc(buffer->arena, sizeof(grpc_linked_mdelem)));
buffer->arena->Alloc(sizeof(grpc_linked_mdelem)));
}
return grpc_metadata_batch_add_tail(&buffer->batch, storage, elem);
}

@ -24,7 +24,8 @@
#include "src/core/lib/transport/transport.h"
struct grpc_chttp2_incoming_metadata_buffer {
grpc_chttp2_incoming_metadata_buffer(gpr_arena* arena) : arena(arena) {
explicit grpc_chttp2_incoming_metadata_buffer(grpc_core::Arena* arena)
: arena(arena) {
grpc_metadata_batch_init(&batch);
batch.deadline = GRPC_MILLIS_INF_FUTURE;
}
@ -34,7 +35,7 @@ struct grpc_chttp2_incoming_metadata_buffer {
static constexpr size_t kPreallocatedMDElem = 10;
gpr_arena* arena;
grpc_core::Arena* arena;
size_t size = 0; // total size of metadata.
size_t count = 0; // minimum of count of metadata and kPreallocatedMDElem.
// These preallocated mdelems are used while count < kPreallocatedMDElem.

@ -504,7 +504,7 @@ typedef enum {
struct grpc_chttp2_stream {
grpc_chttp2_stream(grpc_chttp2_transport* t, grpc_stream_refcount* refcount,
const void* server_data, gpr_arena* arena);
const void* server_data, grpc_core::Arena* arena);
~grpc_chttp2_stream();
void* context;

@ -110,7 +110,7 @@ typedef struct grpc_cronet_transport grpc_cronet_transport;
/* TODO (makdharma): reorder structure for memory efficiency per
http://www.catb.org/esr/structure-packing/#_structure_reordering: */
struct read_state {
read_state(gpr_arena* arena)
read_state(grpc_core::Arena* arena)
: trailing_metadata(arena), initial_metadata(arena) {
grpc_slice_buffer_init(&read_slice_buffer);
}
@ -144,7 +144,7 @@ struct write_state {
/* track state of one stream op */
struct op_state {
op_state(gpr_arena* arena) : rs(arena) {}
op_state(grpc_core::Arena* arena) : rs(arena) {}
bool state_op_done[OP_NUM_OPS] = {};
bool state_callback_received[OP_NUM_OPS] = {};
@ -186,10 +186,10 @@ struct op_storage {
struct stream_obj {
stream_obj(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, gpr_arena* arena);
grpc_stream_refcount* refcount, grpc_core::Arena* arena);
~stream_obj();
gpr_arena* arena;
grpc_core::Arena* arena;
struct op_and_state* oas = nullptr;
grpc_transport_stream_op_batch* curr_op = nullptr;
grpc_cronet_transport* curr_ct;
@ -1368,7 +1368,8 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
*/
inline stream_obj::stream_obj(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, gpr_arena* arena)
grpc_stream_refcount* refcount,
grpc_core::Arena* arena)
: arena(arena),
curr_ct(reinterpret_cast<grpc_cronet_transport*>(gt)),
curr_gs(gs),
@ -1387,7 +1388,7 @@ inline stream_obj::~stream_obj() {
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena) {
grpc_core::Arena* arena) {
new (gs) stream_obj(gt, gs, refcount, arena);
return 0;
}

@ -120,7 +120,7 @@ struct inproc_transport {
struct inproc_stream {
inproc_stream(inproc_transport* t, grpc_stream_refcount* refcount,
const void* server_data, gpr_arena* arena)
const void* server_data, grpc_core::Arena* arena)
: t(t), refs(refcount), arena(arena) {
// Ref this stream right now for ctor and list.
ref("inproc_init_stream:init");
@ -250,7 +250,7 @@ struct inproc_stream {
grpc_stream_refcount* refs;
grpc_closure* closure_at_destroy = nullptr;
gpr_arena* arena;
grpc_core::Arena* arena;
grpc_transport_stream_op_batch* send_message_op = nullptr;
grpc_transport_stream_op_batch* send_trailing_md_op = nullptr;
@ -309,8 +309,8 @@ grpc_error* fill_in_metadata(inproc_stream* s,
grpc_error* error = GRPC_ERROR_NONE;
for (grpc_linked_mdelem* elem = metadata->list.head;
(elem != nullptr) && (error == GRPC_ERROR_NONE); elem = elem->next) {
grpc_linked_mdelem* nelem = static_cast<grpc_linked_mdelem*>(
gpr_arena_alloc(s->arena, sizeof(*nelem)));
grpc_linked_mdelem* nelem =
static_cast<grpc_linked_mdelem*>(s->arena->Alloc(sizeof(*nelem)));
nelem->md =
grpc_mdelem_from_slices(grpc_slice_intern(GRPC_MDKEY(elem->md)),
grpc_slice_intern(GRPC_MDVALUE(elem->md)));
@ -322,7 +322,7 @@ grpc_error* fill_in_metadata(inproc_stream* s,
int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena) {
grpc_core::Arena* arena) {
INPROC_LOG(GPR_INFO, "init_stream %p %p %p", gt, gs, server_data);
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
new (gs) inproc_stream(t, refcount, server_data, arena);
@ -436,13 +436,13 @@ void fail_helper_locked(inproc_stream* s, grpc_error* error) {
// since it expects that as well as no error yet
grpc_metadata_batch fake_md;
grpc_metadata_batch_init(&fake_md);
grpc_linked_mdelem* path_md = static_cast<grpc_linked_mdelem*>(
gpr_arena_alloc(s->arena, sizeof(*path_md)));
grpc_linked_mdelem* path_md =
static_cast<grpc_linked_mdelem*>(s->arena->Alloc(sizeof(*path_md)));
path_md->md = grpc_mdelem_from_slices(g_fake_path_key, g_fake_path_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(&fake_md, path_md) ==
GRPC_ERROR_NONE);
grpc_linked_mdelem* auth_md = static_cast<grpc_linked_mdelem*>(
gpr_arena_alloc(s->arena, sizeof(*auth_md)));
grpc_linked_mdelem* auth_md =
static_cast<grpc_linked_mdelem*>(s->arena->Alloc(sizeof(*auth_md)));
auth_md->md = grpc_mdelem_from_slices(g_fake_auth_key, g_fake_auth_value);
GPR_ASSERT(grpc_metadata_batch_link_tail(&fake_md, auth_md) ==
GRPC_ERROR_NONE);

@ -42,7 +42,7 @@
#include <grpc/support/time.h>
#include "src/core/lib/debug/trace.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/polling_entity.h"
#include "src/core/lib/transport/transport.h"
@ -69,7 +69,7 @@ typedef struct {
const grpc_slice& path;
gpr_timespec start_time;
grpc_millis deadline;
gpr_arena* arena;
grpc_core::Arena* arena;
grpc_core::CallCombiner* call_combiner;
} grpc_call_element_args;

@ -1,152 +0,0 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/lib/gpr/arena.h"
#include <string.h>
#include <new>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gprpp/memory.h"
static void* gpr_arena_malloc(size_t size) {
return gpr_malloc_aligned(size, GPR_MAX_ALIGNMENT);
}
// Uncomment this to use a simple arena that simply allocates the
// requested amount of memory for each call to gpr_arena_alloc(). This
// effectively eliminates the efficiency gain of using an arena, but it
// may be useful for debugging purposes.
//#define SIMPLE_ARENA_FOR_DEBUGGING
#ifdef SIMPLE_ARENA_FOR_DEBUGGING
struct gpr_arena {
gpr_arena() { gpr_mu_init(&mu); }
~gpr_arena() {
gpr_mu_destroy(&mu);
for (size_t i = 0; i < num_ptrs; ++i) {
gpr_free_aligned(ptrs[i]);
}
gpr_free(ptrs);
}
gpr_mu mu;
void** ptrs = nullptr;
size_t num_ptrs = 0;
};
gpr_arena* gpr_arena_create(size_t ignored_initial_size) {
return grpc_core::New<gpr_arena>();
}
size_t gpr_arena_destroy(gpr_arena* arena) {
grpc_core::Delete(arena);
return 1; // Value doesn't matter, since it won't be used.
}
void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
gpr_mu_lock(&arena->mu);
arena->ptrs =
(void**)gpr_realloc(arena->ptrs, sizeof(void*) * (arena->num_ptrs + 1));
void* retval = arena->ptrs[arena->num_ptrs++] = gpr_arena_malloc(size);
gpr_mu_unlock(&arena->mu);
return retval;
}
#else // SIMPLE_ARENA_FOR_DEBUGGING
// TODO(roth): We currently assume that all callers need alignment of 16
// bytes, which may be wrong in some cases. As part of converting the
// arena API to C++, we should consider replacing gpr_arena_alloc() with a
// template that takes the type of the value being allocated, which
// would allow us to use the alignment actually needed by the caller.
typedef struct zone {
zone* next = nullptr;
} zone;
struct gpr_arena {
gpr_arena(size_t initial_size)
: initial_zone_size(initial_size), last_zone(&initial_zone) {
gpr_mu_init(&arena_growth_mutex);
}
~gpr_arena() {
gpr_mu_destroy(&arena_growth_mutex);
zone* z = initial_zone.next;
while (z) {
zone* next_z = z->next;
z->~zone();
gpr_free_aligned(z);
z = next_z;
}
}
// Keep track of the total used size. We use this in our call sizing
// historesis.
gpr_atm total_used = 0;
size_t initial_zone_size;
zone initial_zone;
zone* last_zone;
gpr_mu arena_growth_mutex;
};
gpr_arena* gpr_arena_create(size_t initial_size) {
initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
return new (gpr_arena_malloc(
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + initial_size))
gpr_arena(initial_size);
}
size_t gpr_arena_destroy(gpr_arena* arena) {
const gpr_atm size = gpr_atm_no_barrier_load(&arena->total_used);
arena->~gpr_arena();
gpr_free_aligned(arena);
return static_cast<size_t>(size);
}
void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(size);
size_t begin = gpr_atm_no_barrier_fetch_add(&arena->total_used, size);
if (begin + size <= arena->initial_zone_size) {
return reinterpret_cast<char*>(arena) +
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(gpr_arena)) + begin;
} else {
// If the allocation isn't able to end in the initial zone, create a new
// zone for this allocation, and any unused space in the initial zone is
// wasted. This overflowing and wasting is uncommon because of our arena
// sizing historesis (that is, most calls should have a large enough initial
// zone and will not need to grow the arena).
gpr_mu_lock(&arena->arena_growth_mutex);
zone* z = new (gpr_arena_malloc(
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone)) + size)) zone();
arena->last_zone->next = z;
arena->last_zone = z;
gpr_mu_unlock(&arena->arena_growth_mutex);
return reinterpret_cast<char*>(z) +
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(zone));
}
}
#endif // SIMPLE_ARENA_FOR_DEBUGGING

@ -21,21 +21,27 @@
// the arena as a whole is freed
// Tracks the total memory allocated against it, so that future arenas can
// pre-allocate the right amount of memory
// This transitional API is deprecated and will be removed soon in favour of
// src/core/lib/gprpp/arena.h .
#ifndef GRPC_CORE_LIB_GPR_ARENA_H
#define GRPC_CORE_LIB_GPR_ARENA_H
#include <grpc/support/port_platform.h>
#include <stddef.h>
typedef struct gpr_arena gpr_arena;
#include "src/core/lib/gprpp/arena.h"
// TODO(arjunroy) : Remove deprecated gpr_arena API once all callers are gone.
typedef class grpc_core::Arena gpr_arena;
// Create an arena, with \a initial_size bytes in the first allocated buffer
gpr_arena* gpr_arena_create(size_t initial_size);
// Allocate \a size bytes from the arena
void* gpr_arena_alloc(gpr_arena* arena, size_t size);
inline gpr_arena* gpr_arena_create(size_t initial_size) {
return grpc_core::Arena::Create(initial_size);
}
// Destroy an arena, returning the total number of bytes allocated
size_t gpr_arena_destroy(gpr_arena* arena);
inline size_t gpr_arena_destroy(gpr_arena* arena) { return arena->Destroy(); }
// Allocate \a size bytes from the arena
inline void* gpr_arena_alloc(gpr_arena* arena, size_t size) {
return arena->Alloc(size);
}
#endif /* GRPC_CORE_LIB_GPR_ARENA_H */

@ -0,0 +1,90 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/arena.h"
#include <string.h>
#include <new>
#include <grpc/support/alloc.h>
#include <grpc/support/atm.h>
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gprpp/memory.h"
template <size_t alignment>
static void* gpr_arena_malloc(size_t size) {
return gpr_malloc_aligned(size, alignment);
}
namespace grpc_core {
Arena::~Arena() {
Zone* z = last_zone_;
while (z) {
Zone* prev_z = z->prev;
z->~Zone();
gpr_free_aligned(z);
z = prev_z;
}
}
Arena* Arena::Create(size_t initial_size) {
static constexpr size_t base_size =
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Arena));
initial_size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(initial_size);
size_t alloc_size = base_size + initial_size;
static constexpr size_t alignment =
(GPR_CACHELINE_SIZE > GPR_MAX_ALIGNMENT &&
GPR_CACHELINE_SIZE % GPR_MAX_ALIGNMENT == 0)
? GPR_CACHELINE_SIZE
: GPR_MAX_ALIGNMENT;
return new (gpr_arena_malloc<alignment>(alloc_size)) Arena(initial_size);
}
size_t Arena::Destroy() {
size_t size = total_used_.Load(MemoryOrder::RELAXED);
this->~Arena();
gpr_free_aligned(this);
return size;
}
void* Arena::AllocZone(size_t size) {
// If the allocation isn't able to end in the initial zone, create a new
// zone for this allocation, and any unused space in the initial zone is
// wasted. This overflowing and wasting is uncommon because of our arena
// sizing hysteresis (that is, most calls should have a large enough initial
// zone and will not need to grow the arena).
static constexpr size_t zone_base_size =
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Zone));
size_t alloc_size = zone_base_size + size;
Zone* z = new (gpr_arena_malloc<GPR_MAX_ALIGNMENT>(alloc_size)) Zone();
{
gpr_spinlock_lock(&arena_growth_spinlock_);
z->prev = last_zone_;
last_zone_ = z;
gpr_spinlock_unlock(&arena_growth_spinlock_);
}
return reinterpret_cast<char*>(z) + zone_base_size;
}
} // namespace grpc_core

@ -0,0 +1,99 @@
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// \file Arena based allocator
// Allows very fast allocation of memory, but that memory cannot be freed until
// the arena as a whole is freed
// Tracks the total memory allocated against it, so that future arenas can
// pre-allocate the right amount of memory
#ifndef GRPC_CORE_LIB_GPRPP_ARENA_H
#define GRPC_CORE_LIB_GPRPP_ARENA_H
#include <grpc/support/port_platform.h>
#include <new>
#include <utility>
#include <grpc/support/alloc.h>
#include <grpc/support/sync.h>
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gpr/spinlock.h"
#include "src/core/lib/gprpp/atomic.h"
#include <stddef.h>
namespace grpc_core {
class Arena {
public:
// Create an arena, with \a initial_size bytes in the first allocated buffer.
static Arena* Create(size_t initial_size);
// Destroy an arena, returning the total number of bytes allocated.
size_t Destroy();
// Allocate \a size bytes from the arena.
void* Alloc(size_t size) {
static constexpr size_t base_size =
GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(Arena));
size = GPR_ROUND_UP_TO_ALIGNMENT_SIZE(size);
size_t begin = total_used_.FetchAdd(size, MemoryOrder::RELAXED);
if (begin + size <= initial_zone_size_) {
return reinterpret_cast<char*>(this) + base_size + begin;
} else {
return AllocZone(size);
}
}
// TODO(roth): We currently assume that all callers need alignment of 16
// bytes, which may be wrong in some cases. When we have time, we should
// change this to instead use the alignment of the type being allocated by
// this method.
template <typename T, typename... Args>
T* New(Args&&... args) {
T* t = static_cast<T*>(Alloc(sizeof(T)));
new (t) T(std::forward<Args>(args)...);
return t;
}
private:
struct Zone {
Zone* prev;
};
explicit Arena(size_t initial_size) : initial_zone_size_(initial_size) {}
~Arena();
void* AllocZone(size_t size);
// Keep track of the total used size. We use this in our call sizing
// hysteresis.
Atomic<size_t> total_used_;
size_t initial_zone_size_;
gpr_spinlock arena_growth_spinlock_ = GPR_SPINLOCK_STATIC_INITIALIZER;
// If the initial arena allocation wasn't enough, we allocate additional zones
// in a reverse linked list. Each additional zone consists of (1) a pointer to
// the zone added before this zone (null if this is the first additional zone)
// and (2) the allocated memory. The arena itself maintains a pointer to the
// last zone; the zone list is reverse-walked during arena destruction only.
Zone* last_zone_ = nullptr;
};
} // namespace grpc_core
#endif /* GRPC_CORE_LIB_GPRPP_ARENA_H */

@ -21,8 +21,8 @@
#include <string.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/security/context/security_context.h"
@ -102,9 +102,9 @@ grpc_client_security_context::~grpc_client_security_context() {
}
grpc_client_security_context* grpc_client_security_context_create(
gpr_arena* arena, grpc_call_credentials* creds) {
return new (gpr_arena_alloc(arena, sizeof(grpc_client_security_context)))
grpc_client_security_context(creds != nullptr ? creds->Ref() : nullptr);
grpc_core::Arena* arena, grpc_call_credentials* creds) {
return arena->New<grpc_client_security_context>(
creds != nullptr ? creds->Ref() : nullptr);
}
void grpc_client_security_context_destroy(void* ctx) {
@ -123,9 +123,8 @@ grpc_server_security_context::~grpc_server_security_context() {
}
grpc_server_security_context* grpc_server_security_context_create(
gpr_arena* arena) {
return new (gpr_arena_alloc(arena, sizeof(grpc_server_security_context)))
grpc_server_security_context();
grpc_core::Arena* arena) {
return arena->New<grpc_server_security_context>();
}
void grpc_server_security_context_destroy(void* ctx) {

@ -21,6 +21,7 @@
#include <grpc/support/port_platform.h>
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/gprpp/ref_counted.h"
#include "src/core/lib/gprpp/ref_counted_ptr.h"
#include "src/core/lib/iomgr/pollset.h"
@ -28,8 +29,6 @@
extern grpc_core::DebugOnlyTraceFlag grpc_trace_auth_context_refcount;
struct gpr_arena;
/* --- grpc_auth_context ---
High level authentication context object. Can optionally be chained. */
@ -121,7 +120,7 @@ struct grpc_client_security_context {
};
grpc_client_security_context* grpc_client_security_context_create(
gpr_arena* arena, grpc_call_credentials* creds);
grpc_core::Arena* arena, grpc_call_credentials* creds);
void grpc_client_security_context_destroy(void* ctx);
/* --- grpc_server_security_context ---
@ -137,7 +136,7 @@ struct grpc_server_security_context {
};
grpc_server_security_context* grpc_server_security_context_create(
gpr_arena* arena);
grpc_core::Arena* arena);
void grpc_server_security_context_destroy(void* ctx);
/* --- Channel args for auth context --- */

@ -35,9 +35,9 @@
#include "src/core/lib/compression/algorithm_metadata.h"
#include "src/core/lib/debug/stats.h"
#include "src/core/lib/gpr/alloc.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/gprpp/manual_constructor.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/profiling/timers.h"
@ -124,7 +124,7 @@ struct child_call {
#define RECV_INITIAL_METADATA_FIRST ((gpr_atm)1)
struct grpc_call {
grpc_call(gpr_arena* arena, const grpc_call_create_args& args)
grpc_call(grpc_core::Arena* arena, const grpc_call_create_args& args)
: arena(arena),
cq(args.cq),
channel(args.channel),
@ -143,7 +143,7 @@ struct grpc_call {
}
gpr_refcount ext_ref;
gpr_arena* arena;
grpc_core::Arena* arena;
grpc_core::CallCombiner call_combiner;
grpc_completion_queue* cq;
grpc_polling_entity pollent;
@ -290,13 +290,13 @@ static void add_init_error(grpc_error** composite, grpc_error* new_err) {
}
void* grpc_call_arena_alloc(grpc_call* call, size_t size) {
return gpr_arena_alloc(call->arena, size);
return call->arena->Alloc(size);
}
static parent_call* get_or_create_parent_call(grpc_call* call) {
parent_call* p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
if (p == nullptr) {
p = new (gpr_arena_alloc(call->arena, sizeof(*p))) parent_call();
p = call->arena->New<parent_call>();
if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm) nullptr,
(gpr_atm)p)) {
p->~parent_call();
@ -327,10 +327,10 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
grpc_call* call;
size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size);
gpr_arena* arena = gpr_arena_create(initial_size);
call = new (gpr_arena_alloc(
arena, GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
channel_stack->call_stack_size)) grpc_call(arena, *args);
grpc_core::Arena* arena = grpc_core::Arena::Create(initial_size);
call = new (arena->Alloc(GPR_ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call)) +
channel_stack->call_stack_size))
grpc_call(arena, *args);
*out_call = call;
grpc_slice path = grpc_empty_slice();
if (call->is_client) {
@ -362,8 +362,7 @@ grpc_error* grpc_call_create(const grpc_call_create_args* args,
bool immediately_cancel = false;
if (args->parent != nullptr) {
call->child = new (gpr_arena_alloc(arena, sizeof(child_call)))
child_call(args->parent);
call->child = arena->New<child_call>(args->parent);
GRPC_CALL_INTERNAL_REF(args->parent, "child");
GPR_ASSERT(call->is_client);
@ -500,9 +499,9 @@ void grpc_call_internal_unref(grpc_call* c REF_ARG) {
static void release_call(void* call, grpc_error* error) {
grpc_call* c = static_cast<grpc_call*>(call);
grpc_channel* channel = c->channel;
gpr_arena* arena = c->arena;
grpc_core::Arena* arena = c->arena;
c->~grpc_call();
grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(arena));
grpc_channel_update_call_size_estimate(channel, arena->Destroy());
GRPC_CHANNEL_INTERNAL_UNREF(channel, "call");
}
@ -1067,7 +1066,7 @@ static void recv_trailing_filter(void* args, grpc_metadata_batch* b,
publish_app_metadata(call, b, true);
}
gpr_arena* grpc_call_get_arena(grpc_call* call) { return call->arena; }
grpc_core::Arena* grpc_call_get_arena(grpc_call* call) { return call->arena; }
grpc_call_stack* grpc_call_get_call_stack(grpc_call* call) {
return CALL_STACK_FROM_CALL(call);
@ -1128,8 +1127,7 @@ static batch_control* reuse_or_allocate_batch_control(grpc_call* call,
bctl->~batch_control();
bctl->op = {};
} else {
bctl = new (gpr_arena_alloc(call->arena, sizeof(batch_control)))
batch_control();
bctl = call->arena->New<batch_control>();
*pslot = bctl;
}
bctl->call = call;

@ -23,6 +23,7 @@
#include "src/core/lib/channel/channel_stack.h"
#include "src/core/lib/channel/context.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/surface/api_trace.h"
#include <grpc/grpc.h>
@ -72,7 +73,7 @@ void grpc_call_internal_unref(grpc_call* call);
#define GRPC_CALL_INTERNAL_UNREF(call, reason) grpc_call_internal_unref(call)
#endif
gpr_arena* grpc_call_get_arena(grpc_call* call);
grpc_core::Arena* grpc_call_get_arena(grpc_call* call);
grpc_call_stack* grpc_call_get_call_stack(grpc_call* call);

@ -124,7 +124,8 @@ void grpc_transport_destroy(grpc_transport* transport) {
int grpc_transport_init_stream(grpc_transport* transport, grpc_stream* stream,
grpc_stream_refcount* refcount,
const void* server_data, gpr_arena* arena) {
const void* server_data,
grpc_core::Arena* arena) {
return transport->vtable->init_stream(transport, stream, refcount,
server_data, arena);
}

@ -24,7 +24,7 @@
#include <stddef.h>
#include "src/core/lib/channel/context.h"
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/arena.h"
#include "src/core/lib/iomgr/call_combiner.h"
#include "src/core/lib/iomgr/endpoint.h"
#include "src/core/lib/iomgr/polling_entity.h"
@ -358,7 +358,8 @@ size_t grpc_transport_stream_size(grpc_transport* transport);
supplied from the accept_stream callback function */
int grpc_transport_init_stream(grpc_transport* transport, grpc_stream* stream,
grpc_stream_refcount* refcount,
const void* server_data, gpr_arena* arena);
const void* server_data,
grpc_core::Arena* arena);
void grpc_transport_set_pops(grpc_transport* transport, grpc_stream* stream,
grpc_polling_entity* pollent);

@ -34,7 +34,7 @@ typedef struct grpc_transport_vtable {
/* implementation of grpc_transport_init_stream */
int (*init_stream)(grpc_transport* self, grpc_stream* stream,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena);
grpc_core::Arena* arena);
/* implementation of grpc_transport_set_pollset */
void (*set_pollset)(grpc_transport* self, grpc_stream* stream,

@ -19,7 +19,6 @@ CORE_SOURCE_FILES = [
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/arena.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
@ -52,6 +51,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/arena.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',

@ -16,7 +16,7 @@
*
*/
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/arena.h"
#include <inttypes.h>
#include <string.h>
@ -31,7 +31,9 @@
#include "src/core/lib/gprpp/thd.h"
#include "test/core/util/test_config.h"
static void test_noop(void) { gpr_arena_destroy(gpr_arena_create(1)); }
using grpc_core::Arena;
static void test_noop(void) { Arena::Create(1)->Destroy(); }
static void test(const char* name, size_t init_size, const size_t* allocs,
size_t nallocs) {
@ -50,10 +52,10 @@ static void test(const char* name, size_t init_size, const size_t* allocs,
gpr_log(GPR_INFO, "%s", s);
gpr_free(s);
gpr_arena* a = gpr_arena_create(init_size);
Arena* a = Arena::Create(init_size);
void** ps = static_cast<void**>(gpr_zalloc(sizeof(*ps) * nallocs));
for (size_t i = 0; i < nallocs; i++) {
ps[i] = gpr_arena_alloc(a, allocs[i]);
ps[i] = a->Alloc(allocs[i]);
// ensure the returned address is aligned
GPR_ASSERT(((intptr_t)ps[i] & 0xf) == 0);
// ensure no duplicate results
@ -63,7 +65,7 @@ static void test(const char* name, size_t init_size, const size_t* allocs,
// ensure writable
memset(ps[i], 1, allocs[i]);
}
gpr_arena_destroy(a);
a->Destroy();
gpr_free(ps);
}
@ -80,14 +82,14 @@ size_t concurrent_test_iterations() {
typedef struct {
gpr_event ev_start;
gpr_arena* arena;
Arena* arena;
} concurrent_test_args;
static void concurrent_test_body(void* arg) {
concurrent_test_args* a = static_cast<concurrent_test_args*>(arg);
gpr_event_wait(&a->ev_start, gpr_inf_future(GPR_CLOCK_REALTIME));
for (size_t i = 0; i < concurrent_test_iterations(); i++) {
*static_cast<char*>(gpr_arena_alloc(a->arena, 1)) = static_cast<char>(i);
*static_cast<char*>(a->arena->Alloc(1)) = static_cast<char>(i);
}
}
@ -96,7 +98,7 @@ static void concurrent_test(void) {
concurrent_test_args args;
gpr_event_init(&args.ev_start);
args.arena = gpr_arena_create(1024);
args.arena = Arena::Create(1024);
grpc_core::Thread thds[CONCURRENT_TEST_THREADS];
@ -112,7 +114,7 @@ static void concurrent_test(void) {
th.Join();
}
gpr_arena_destroy(args.arena);
args.arena->Destroy();
}
int main(int argc, char* argv[]) {

@ -19,40 +19,42 @@
/* Benchmark arenas */
#include <benchmark/benchmark.h>
#include "src/core/lib/gpr/arena.h"
#include "src/core/lib/gprpp/arena.h"
#include "test/cpp/microbenchmarks/helpers.h"
#include "test/cpp/util/test_config.h"
using grpc_core::Arena;
static void BM_Arena_NoOp(benchmark::State& state) {
while (state.KeepRunning()) {
gpr_arena_destroy(gpr_arena_create(state.range(0)));
Arena::Create(state.range(0))->Destroy();
}
}
BENCHMARK(BM_Arena_NoOp)->Range(1, 1024 * 1024);
static void BM_Arena_ManyAlloc(benchmark::State& state) {
gpr_arena* a = gpr_arena_create(state.range(0));
Arena* a = Arena::Create(state.range(0));
const size_t realloc_after =
1024 * 1024 * 1024 / ((state.range(1) + 15) & 0xffffff0u);
while (state.KeepRunning()) {
gpr_arena_alloc(a, state.range(1));
a->Alloc(state.range(1));
// periodically recreate arena to avoid OOM
if (state.iterations() % realloc_after == 0) {
gpr_arena_destroy(a);
a = gpr_arena_create(state.range(0));
a->Destroy();
a = Arena::Create(state.range(0));
}
}
gpr_arena_destroy(a);
a->Destroy();
}
BENCHMARK(BM_Arena_ManyAlloc)->Ranges({{1, 1024 * 1024}, {1, 32 * 1024}});
static void BM_Arena_Batch(benchmark::State& state) {
while (state.KeepRunning()) {
gpr_arena* a = gpr_arena_create(state.range(0));
Arena* a = Arena::Create(state.range(0));
for (int i = 0; i < state.range(1); i++) {
gpr_arena_alloc(a, state.range(2));
a->Alloc(state.range(2));
}
gpr_arena_destroy(a);
a->Destroy();
}
}
BENCHMARK(BM_Arena_Batch)->Ranges({{1, 64 * 1024}, {1, 64}, {1, 1024}});

@ -405,7 +405,7 @@ const char* name;
/* implementation of grpc_transport_init_stream */
int InitStream(grpc_transport* self, grpc_stream* stream,
grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena) {
grpc_core::Arena* arena) {
return 0;
}
@ -540,7 +540,7 @@ static void BM_IsolatedFilter(benchmark::State& state) {
method,
start_time,
deadline,
gpr_arena_create(kArenaSize),
grpc_core::Arena::Create(kArenaSize),
nullptr};
while (state.KeepRunning()) {
GPR_TIMER_SCOPE("BenchmarkCycle", 0);
@ -552,11 +552,11 @@ static void BM_IsolatedFilter(benchmark::State& state) {
grpc_core::ExecCtx::Get()->Flush();
// recreate arena every 64k iterations to avoid oom
if (0 == (state.iterations() & 0xffff)) {
gpr_arena_destroy(call_args.arena);
call_args.arena = gpr_arena_create(kArenaSize);
call_args.arena->Destroy();
call_args.arena = grpc_core::Arena::Create(kArenaSize);
}
}
gpr_arena_destroy(call_args.arena);
call_args.arena->Destroy();
grpc_channel_stack_destroy(channel_stack);
grpc_core::ExecCtx::Get()->Flush();

@ -458,7 +458,7 @@ static void BM_HpackParserParseHeader(benchmark::State& state) {
grpc_chttp2_hpack_parser p;
grpc_chttp2_hpack_parser_init(&p);
const int kArenaSize = 4096 * 4096;
p.on_header_user_data = gpr_arena_create(kArenaSize);
p.on_header_user_data = grpc_core::Arena::Create(kArenaSize);
p.on_header = OnHeader;
for (auto slice : init_slices) {
GPR_ASSERT(GRPC_ERROR_NONE == grpc_chttp2_hpack_parser_parse(&p, slice));
@ -470,12 +470,12 @@ static void BM_HpackParserParseHeader(benchmark::State& state) {
grpc_core::ExecCtx::Get()->Flush();
// Recreate arena every 4k iterations to avoid oom
if (0 == (state.iterations() & 0xfff)) {
gpr_arena_destroy((gpr_arena*)p.on_header_user_data);
p.on_header_user_data = gpr_arena_create(kArenaSize);
static_cast<grpc_core::Arena*>(p.on_header_user_data)->Destroy();
p.on_header_user_data = grpc_core::Arena::Create(kArenaSize);
}
}
// Clean up
gpr_arena_destroy((gpr_arena*)p.on_header_user_data);
static_cast<grpc_core::Arena*>(p.on_header_user_data)->Destroy();
for (auto slice : init_slices) grpc_slice_unref(slice);
for (auto slice : benchmark_slices) grpc_slice_unref(slice);
grpc_chttp2_hpack_parser_destroy(&p);
@ -778,7 +778,8 @@ static void free_timeout(void* p) { gpr_free(p); }
// Benchmark the current on_initial_header implementation
static void OnInitialHeader(void* user_data, grpc_mdelem md) {
// Setup for benchmark. This will bloat the absolute values of this benchmark
grpc_chttp2_incoming_metadata_buffer buffer((gpr_arena*)user_data);
grpc_chttp2_incoming_metadata_buffer buffer(
static_cast<grpc_core::Arena*>(user_data));
bool seen_error = false;
// Below here is the code we actually care about benchmarking

@ -193,13 +193,13 @@ class Stream {
Stream(Fixture* f) : f_(f) {
stream_size_ = grpc_transport_stream_size(f->transport());
stream_ = gpr_malloc(stream_size_);
arena_ = gpr_arena_create(4096);
arena_ = grpc_core::Arena::Create(4096);
}
~Stream() {
gpr_event_wait(&done_, gpr_inf_future(GPR_CLOCK_REALTIME));
gpr_free(stream_);
gpr_arena_destroy(arena_);
arena_->Destroy();
}
void Init(benchmark::State& state) {
@ -208,8 +208,8 @@ class Stream {
gpr_event_init(&done_);
memset(stream_, 0, stream_size_);
if ((state.iterations() & 0xffff) == 0) {
gpr_arena_destroy(arena_);
arena_ = gpr_arena_create(4096);
arena_->Destroy();
arena_ = grpc_core::Arena::Create(4096);
}
grpc_transport_init_stream(f_->transport(),
static_cast<grpc_stream*>(stream_), &refcount_,
@ -245,7 +245,7 @@ class Stream {
Fixture* f_;
grpc_stream_refcount refcount_;
gpr_arena* arena_;
grpc_core::Arena* arena_;
size_t stream_size_;
void* stream_;
grpc_closure* destroy_closure_ = nullptr;

@ -1082,6 +1082,7 @@ src/core/lib/gpr/tls_pthread.h \
src/core/lib/gpr/tmpfile.h \
src/core/lib/gpr/useful.h \
src/core/lib/gprpp/abstract.h \
src/core/lib/gprpp/arena.h \
src/core/lib/gprpp/atomic.h \
src/core/lib/gprpp/debug_location.h \
src/core/lib/gprpp/fork.h \

@ -1110,7 +1110,6 @@ src/core/lib/debug/trace.h \
src/core/lib/gpr/README.md \
src/core/lib/gpr/alloc.cc \
src/core/lib/gpr/alloc.h \
src/core/lib/gpr/arena.cc \
src/core/lib/gpr/arena.h \
src/core/lib/gpr/atm.cc \
src/core/lib/gpr/cpu_iphone.cc \
@ -1160,6 +1159,8 @@ src/core/lib/gpr/useful.h \
src/core/lib/gpr/wrap_memcpy.cc \
src/core/lib/gprpp/README.md \
src/core/lib/gprpp/abstract.h \
src/core/lib/gprpp/arena.cc \
src/core/lib/gprpp/arena.h \
src/core/lib/gprpp/atomic.h \
src/core/lib/gprpp/debug_location.h \
src/core/lib/gprpp/fork.cc \

@ -7982,7 +7982,6 @@
"name": "gpr_base",
"src": [
"src/core/lib/gpr/alloc.cc",
"src/core/lib/gpr/arena.cc",
"src/core/lib/gpr/atm.cc",
"src/core/lib/gpr/cpu_iphone.cc",
"src/core/lib/gpr/cpu_linux.cc",
@ -8015,6 +8014,7 @@
"src/core/lib/gpr/tmpfile_posix.cc",
"src/core/lib/gpr/tmpfile_windows.cc",
"src/core/lib/gpr/wrap_memcpy.cc",
"src/core/lib/gprpp/arena.cc",
"src/core/lib/gprpp/fork.cc",
"src/core/lib/gprpp/thd_posix.cc",
"src/core/lib/gprpp/thd_windows.cc",
@ -8063,6 +8063,7 @@
"src/core/lib/gpr/tmpfile.h",
"src/core/lib/gpr/useful.h",
"src/core/lib/gprpp/abstract.h",
"src/core/lib/gprpp/arena.h",
"src/core/lib/gprpp/atomic.h",
"src/core/lib/gprpp/fork.h",
"src/core/lib/gprpp/manual_constructor.h",
@ -8111,6 +8112,7 @@
"src/core/lib/gpr/tmpfile.h",
"src/core/lib/gpr/useful.h",
"src/core/lib/gprpp/abstract.h",
"src/core/lib/gprpp/arena.h",
"src/core/lib/gprpp/atomic.h",
"src/core/lib/gprpp/fork.h",
"src/core/lib/gprpp/manual_constructor.h",

Loading…
Cancel
Save